13f6688db04e529ae8ebc89abb1527bc968b3c87
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/mathematics.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/imgutils.h"
31 #include "libavutil/dict.h"
32 #include "libavutil/parseutils.h"
33 #include "libavutil/samplefmt.h"
34 #include "libavutil/avassert.h"
35 #include "libavformat/avformat.h"
36 #include "libavdevice/avdevice.h"
37 #include "libswscale/swscale.h"
38 #include "libavcodec/audioconvert.h"
39 #include "libavutil/opt.h"
40 #include "libavcodec/avfft.h"
41
42 #if CONFIG_AVFILTER
43 # include "libavfilter/avcodec.h"
44 # include "libavfilter/avfilter.h"
45 # include "libavfilter/avfiltergraph.h"
46 # include "libavfilter/vsink_buffer.h"
47 #endif
48
49 #include <SDL.h>
50 #include <SDL_thread.h>
51
52 #include "cmdutils.h"
53
54 #include <unistd.h>
55 #include <assert.h>
56
57 const char program_name[] = "ffplay";
58 const int program_birth_year = 2003;
59
60 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
61 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
62 #define MIN_FRAMES 5
63
64 /* SDL audio buffer size, in samples. Should be small to have precise
65    A/V sync as SDL does not have hardware buffer fullness info. */
66 #define SDL_AUDIO_BUFFER_SIZE 1024
67
68 /* no AV sync correction is done if below the AV sync threshold */
69 #define AV_SYNC_THRESHOLD 0.01
70 /* no AV correction is done if too big error */
71 #define AV_NOSYNC_THRESHOLD 10.0
72
73 #define FRAME_SKIP_FACTOR 0.05
74
75 /* maximum audio speed change to get correct sync */
76 #define SAMPLE_CORRECTION_PERCENT_MAX 10
77
78 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
79 #define AUDIO_DIFF_AVG_NB   20
80
81 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
82 #define SAMPLE_ARRAY_SIZE (2*65536)
83
84 static int sws_flags = SWS_BICUBIC;
85
86 typedef struct PacketQueue {
87     AVPacketList *first_pkt, *last_pkt;
88     int nb_packets;
89     int size;
90     int abort_request;
91     SDL_mutex *mutex;
92     SDL_cond *cond;
93 } PacketQueue;
94
95 #define VIDEO_PICTURE_QUEUE_SIZE 2
96 #define SUBPICTURE_QUEUE_SIZE 4
97
98 typedef struct VideoPicture {
99     double pts;                                  ///<presentation time stamp for this picture
100     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
101     double duration;                             ///<expected duration of the frame
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterBufferRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *read_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139
140     int audio_stream;
141
142     int av_sync_type;
143     double external_clock; /* external clock base */
144     int64_t external_clock_time;
145
146     double audio_clock;
147     double audio_diff_cum; /* used for AV difference average computation */
148     double audio_diff_avg_coef;
149     double audio_diff_threshold;
150     int audio_diff_avg_count;
151     AVStream *audio_st;
152     PacketQueue audioq;
153     int audio_hw_buf_size;
154     /* samples output by the codec. we reserve more space for avsync
155        compensation */
156     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
157     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     uint8_t *audio_buf;
159     unsigned int audio_buf_size; /* in bytes */
160     int audio_buf_index; /* in bytes */
161     int audio_write_buf_size;
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum AVSampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166     double audio_current_pts;
167     double audio_current_pts_drift;
168
169     enum ShowMode {
170         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
171     } show_mode;
172     int16_t sample_array[SAMPLE_ARRAY_SIZE];
173     int sample_array_index;
174     int last_i_start;
175     RDFTContext *rdft;
176     int rdft_bits;
177     FFTSample *rdft_data;
178     int xpos;
179
180     SDL_Thread *subtitle_tid;
181     int subtitle_stream;
182     int subtitle_stream_changed;
183     AVStream *subtitle_st;
184     PacketQueue subtitleq;
185     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
186     int subpq_size, subpq_rindex, subpq_windex;
187     SDL_mutex *subpq_mutex;
188     SDL_cond *subpq_cond;
189
190     double frame_timer;
191     double frame_last_pts;
192     double frame_last_delay;
193     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
194     int video_stream;
195     AVStream *video_st;
196     PacketQueue videoq;
197     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
198     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
199     int64_t video_current_pos;                   ///<current displayed file pos
200     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
201     int pictq_size, pictq_rindex, pictq_windex;
202     SDL_mutex *pictq_mutex;
203     SDL_cond *pictq_cond;
204 #if !CONFIG_AVFILTER
205     struct SwsContext *img_convert_ctx;
206 #endif
207
208     char filename[1024];
209     int width, height, xleft, ytop;
210
211 #if CONFIG_AVFILTER
212     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
213 #endif
214
215     float skip_frames;
216     float skip_frames_index;
217     int refresh;
218 } VideoState;
219
220 static int opt_help(const char *opt, const char *arg);
221
222 /* options specified by the user */
223 static AVInputFormat *file_iformat;
224 static const char *input_filename;
225 static const char *window_title;
226 static int fs_screen_width;
227 static int fs_screen_height;
228 static int screen_width = 0;
229 static int screen_height = 0;
230 static int audio_disable;
231 static int video_disable;
232 static int wanted_stream[AVMEDIA_TYPE_NB]={
233     [AVMEDIA_TYPE_AUDIO]=-1,
234     [AVMEDIA_TYPE_VIDEO]=-1,
235     [AVMEDIA_TYPE_SUBTITLE]=-1,
236 };
237 static int seek_by_bytes=-1;
238 static int display_disable;
239 static int show_status = 1;
240 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
241 static int64_t start_time = AV_NOPTS_VALUE;
242 static int64_t duration = AV_NOPTS_VALUE;
243 static int step = 0;
244 static int thread_count = 1;
245 static int workaround_bugs = 1;
246 static int fast = 0;
247 static int genpts = 0;
248 static int lowres = 0;
249 static int idct = FF_IDCT_AUTO;
250 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
251 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
252 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
253 static int error_recognition = FF_ER_CAREFUL;
254 static int error_concealment = 3;
255 static int decoder_reorder_pts= -1;
256 static int autoexit;
257 static int exit_on_keydown;
258 static int exit_on_mousedown;
259 static int loop=1;
260 static int framedrop=-1;
261 static enum ShowMode show_mode = SHOW_MODE_NONE;
262
263 static int rdftspeed=20;
264 #if CONFIG_AVFILTER
265 static char *vfilters = NULL;
266 #endif
267
268 /* current context */
269 static int is_full_screen;
270 static VideoState *cur_stream;
271 static int64_t audio_callback_time;
272
273 static AVPacket flush_pkt;
274
275 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
276 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
277 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
278
279 static SDL_Surface *screen;
280
281 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
282 {
283     AVPacketList *pkt1;
284
285     /* duplicate the packet */
286     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
287         return -1;
288
289     pkt1 = av_malloc(sizeof(AVPacketList));
290     if (!pkt1)
291         return -1;
292     pkt1->pkt = *pkt;
293     pkt1->next = NULL;
294
295
296     SDL_LockMutex(q->mutex);
297
298     if (!q->last_pkt)
299
300         q->first_pkt = pkt1;
301     else
302         q->last_pkt->next = pkt1;
303     q->last_pkt = pkt1;
304     q->nb_packets++;
305     q->size += pkt1->pkt.size + sizeof(*pkt1);
306     /* XXX: should duplicate packet data in DV case */
307     SDL_CondSignal(q->cond);
308
309     SDL_UnlockMutex(q->mutex);
310     return 0;
311 }
312
313 /* packet queue handling */
314 static void packet_queue_init(PacketQueue *q)
315 {
316     memset(q, 0, sizeof(PacketQueue));
317     q->mutex = SDL_CreateMutex();
318     q->cond = SDL_CreateCond();
319     packet_queue_put(q, &flush_pkt);
320 }
321
322 static void packet_queue_flush(PacketQueue *q)
323 {
324     AVPacketList *pkt, *pkt1;
325
326     SDL_LockMutex(q->mutex);
327     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
328         pkt1 = pkt->next;
329         av_free_packet(&pkt->pkt);
330         av_freep(&pkt);
331     }
332     q->last_pkt = NULL;
333     q->first_pkt = NULL;
334     q->nb_packets = 0;
335     q->size = 0;
336     SDL_UnlockMutex(q->mutex);
337 }
338
339 static void packet_queue_end(PacketQueue *q)
340 {
341     packet_queue_flush(q);
342     SDL_DestroyMutex(q->mutex);
343     SDL_DestroyCond(q->cond);
344 }
345
346 static void packet_queue_abort(PacketQueue *q)
347 {
348     SDL_LockMutex(q->mutex);
349
350     q->abort_request = 1;
351
352     SDL_CondSignal(q->cond);
353
354     SDL_UnlockMutex(q->mutex);
355 }
356
357 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
358 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
359 {
360     AVPacketList *pkt1;
361     int ret;
362
363     SDL_LockMutex(q->mutex);
364
365     for(;;) {
366         if (q->abort_request) {
367             ret = -1;
368             break;
369         }
370
371         pkt1 = q->first_pkt;
372         if (pkt1) {
373             q->first_pkt = pkt1->next;
374             if (!q->first_pkt)
375                 q->last_pkt = NULL;
376             q->nb_packets--;
377             q->size -= pkt1->pkt.size + sizeof(*pkt1);
378             *pkt = pkt1->pkt;
379             av_free(pkt1);
380             ret = 1;
381             break;
382         } else if (!block) {
383             ret = 0;
384             break;
385         } else {
386             SDL_CondWait(q->cond, q->mutex);
387         }
388     }
389     SDL_UnlockMutex(q->mutex);
390     return ret;
391 }
392
393 static inline void fill_rectangle(SDL_Surface *screen,
394                                   int x, int y, int w, int h, int color)
395 {
396     SDL_Rect rect;
397     rect.x = x;
398     rect.y = y;
399     rect.w = w;
400     rect.h = h;
401     SDL_FillRect(screen, &rect, color);
402 }
403
404 #define ALPHA_BLEND(a, oldp, newp, s)\
405 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
406
407 #define RGBA_IN(r, g, b, a, s)\
408 {\
409     unsigned int v = ((const uint32_t *)(s))[0];\
410     a = (v >> 24) & 0xff;\
411     r = (v >> 16) & 0xff;\
412     g = (v >> 8) & 0xff;\
413     b = v & 0xff;\
414 }
415
416 #define YUVA_IN(y, u, v, a, s, pal)\
417 {\
418     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
419     a = (val >> 24) & 0xff;\
420     y = (val >> 16) & 0xff;\
421     u = (val >> 8) & 0xff;\
422     v = val & 0xff;\
423 }
424
425 #define YUVA_OUT(d, y, u, v, a)\
426 {\
427     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
428 }
429
430
431 #define BPP 1
432
433 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
434 {
435     int wrap, wrap3, width2, skip2;
436     int y, u, v, a, u1, v1, a1, w, h;
437     uint8_t *lum, *cb, *cr;
438     const uint8_t *p;
439     const uint32_t *pal;
440     int dstx, dsty, dstw, dsth;
441
442     dstw = av_clip(rect->w, 0, imgw);
443     dsth = av_clip(rect->h, 0, imgh);
444     dstx = av_clip(rect->x, 0, imgw - dstw);
445     dsty = av_clip(rect->y, 0, imgh - dsth);
446     lum = dst->data[0] + dsty * dst->linesize[0];
447     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
448     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
449
450     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
451     skip2 = dstx >> 1;
452     wrap = dst->linesize[0];
453     wrap3 = rect->pict.linesize[0];
454     p = rect->pict.data[0];
455     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
456
457     if (dsty & 1) {
458         lum += dstx;
459         cb += skip2;
460         cr += skip2;
461
462         if (dstx & 1) {
463             YUVA_IN(y, u, v, a, p, pal);
464             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
465             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
466             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
467             cb++;
468             cr++;
469             lum++;
470             p += BPP;
471         }
472         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
473             YUVA_IN(y, u, v, a, p, pal);
474             u1 = u;
475             v1 = v;
476             a1 = a;
477             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
478
479             YUVA_IN(y, u, v, a, p + BPP, pal);
480             u1 += u;
481             v1 += v;
482             a1 += a;
483             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
484             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
485             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
486             cb++;
487             cr++;
488             p += 2 * BPP;
489             lum += 2;
490         }
491         if (w) {
492             YUVA_IN(y, u, v, a, p, pal);
493             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
494             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
495             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
496             p++;
497             lum++;
498         }
499         p += wrap3 - dstw * BPP;
500         lum += wrap - dstw - dstx;
501         cb += dst->linesize[1] - width2 - skip2;
502         cr += dst->linesize[2] - width2 - skip2;
503     }
504     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
505         lum += dstx;
506         cb += skip2;
507         cr += skip2;
508
509         if (dstx & 1) {
510             YUVA_IN(y, u, v, a, p, pal);
511             u1 = u;
512             v1 = v;
513             a1 = a;
514             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
515             p += wrap3;
516             lum += wrap;
517             YUVA_IN(y, u, v, a, p, pal);
518             u1 += u;
519             v1 += v;
520             a1 += a;
521             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
522             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
523             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
524             cb++;
525             cr++;
526             p += -wrap3 + BPP;
527             lum += -wrap + 1;
528         }
529         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
530             YUVA_IN(y, u, v, a, p, pal);
531             u1 = u;
532             v1 = v;
533             a1 = a;
534             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
535
536             YUVA_IN(y, u, v, a, p + BPP, pal);
537             u1 += u;
538             v1 += v;
539             a1 += a;
540             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
541             p += wrap3;
542             lum += wrap;
543
544             YUVA_IN(y, u, v, a, p, pal);
545             u1 += u;
546             v1 += v;
547             a1 += a;
548             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
549
550             YUVA_IN(y, u, v, a, p + BPP, pal);
551             u1 += u;
552             v1 += v;
553             a1 += a;
554             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
555
556             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
557             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
558
559             cb++;
560             cr++;
561             p += -wrap3 + 2 * BPP;
562             lum += -wrap + 2;
563         }
564         if (w) {
565             YUVA_IN(y, u, v, a, p, pal);
566             u1 = u;
567             v1 = v;
568             a1 = a;
569             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
570             p += wrap3;
571             lum += wrap;
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 += u;
574             v1 += v;
575             a1 += a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
578             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
579             cb++;
580             cr++;
581             p += -wrap3 + BPP;
582             lum += -wrap + 1;
583         }
584         p += wrap3 + (wrap3 - dstw * BPP);
585         lum += wrap + (wrap - dstw - dstx);
586         cb += dst->linesize[1] - width2 - skip2;
587         cr += dst->linesize[2] - width2 - skip2;
588     }
589     /* handle odd height */
590     if (h) {
591         lum += dstx;
592         cb += skip2;
593         cr += skip2;
594
595         if (dstx & 1) {
596             YUVA_IN(y, u, v, a, p, pal);
597             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
598             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
599             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
600             cb++;
601             cr++;
602             lum++;
603             p += BPP;
604         }
605         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
606             YUVA_IN(y, u, v, a, p, pal);
607             u1 = u;
608             v1 = v;
609             a1 = a;
610             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
611
612             YUVA_IN(y, u, v, a, p + BPP, pal);
613             u1 += u;
614             v1 += v;
615             a1 += a;
616             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
617             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
618             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
619             cb++;
620             cr++;
621             p += 2 * BPP;
622             lum += 2;
623         }
624         if (w) {
625             YUVA_IN(y, u, v, a, p, pal);
626             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
627             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
628             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
629         }
630     }
631 }
632
633 static void free_subpicture(SubPicture *sp)
634 {
635     avsubtitle_free(&sp->sub);
636 }
637
638 static void video_image_display(VideoState *is)
639 {
640     VideoPicture *vp;
641     SubPicture *sp;
642     AVPicture pict;
643     float aspect_ratio;
644     int width, height, x, y;
645     SDL_Rect rect;
646     int i;
647
648     vp = &is->pictq[is->pictq_rindex];
649     if (vp->bmp) {
650 #if CONFIG_AVFILTER
651          if (vp->picref->video->sample_aspect_ratio.num == 0)
652              aspect_ratio = 0;
653          else
654              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
655 #else
656
657         /* XXX: use variable in the frame */
658         if (is->video_st->sample_aspect_ratio.num)
659             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
660         else if (is->video_st->codec->sample_aspect_ratio.num)
661             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
662         else
663             aspect_ratio = 0;
664 #endif
665         if (aspect_ratio <= 0.0)
666             aspect_ratio = 1.0;
667         aspect_ratio *= (float)vp->width / (float)vp->height;
668
669         if (is->subtitle_st) {
670             if (is->subpq_size > 0) {
671                 sp = &is->subpq[is->subpq_rindex];
672
673                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
674                     SDL_LockYUVOverlay (vp->bmp);
675
676                     pict.data[0] = vp->bmp->pixels[0];
677                     pict.data[1] = vp->bmp->pixels[2];
678                     pict.data[2] = vp->bmp->pixels[1];
679
680                     pict.linesize[0] = vp->bmp->pitches[0];
681                     pict.linesize[1] = vp->bmp->pitches[2];
682                     pict.linesize[2] = vp->bmp->pitches[1];
683
684                     for (i = 0; i < sp->sub.num_rects; i++)
685                         blend_subrect(&pict, sp->sub.rects[i],
686                                       vp->bmp->w, vp->bmp->h);
687
688                     SDL_UnlockYUVOverlay (vp->bmp);
689                 }
690             }
691         }
692
693
694         /* XXX: we suppose the screen has a 1.0 pixel ratio */
695         height = is->height;
696         width = ((int)rint(height * aspect_ratio)) & ~1;
697         if (width > is->width) {
698             width = is->width;
699             height = ((int)rint(width / aspect_ratio)) & ~1;
700         }
701         x = (is->width - width) / 2;
702         y = (is->height - height) / 2;
703         is->no_background = 0;
704         rect.x = is->xleft + x;
705         rect.y = is->ytop  + y;
706         rect.w = FFMAX(width,  1);
707         rect.h = FFMAX(height, 1);
708         SDL_DisplayYUVOverlay(vp->bmp, &rect);
709     }
710 }
711
712 static inline int compute_mod(int a, int b)
713 {
714     return a < 0 ? a%b + b : a%b;
715 }
716
717 static void video_audio_display(VideoState *s)
718 {
719     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
720     int ch, channels, h, h2, bgcolor, fgcolor;
721     int16_t time_diff;
722     int rdft_bits, nb_freq;
723
724     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
725         ;
726     nb_freq= 1<<(rdft_bits-1);
727
728     /* compute display index : center on currently output samples */
729     channels = s->audio_st->codec->channels;
730     nb_display_channels = channels;
731     if (!s->paused) {
732         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
733         n = 2 * channels;
734         delay = s->audio_write_buf_size;
735         delay /= n;
736
737         /* to be more precise, we take into account the time spent since
738            the last buffer computation */
739         if (audio_callback_time) {
740             time_diff = av_gettime() - audio_callback_time;
741             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
742         }
743
744         delay += 2*data_used;
745         if (delay < data_used)
746             delay = data_used;
747
748         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
749         if (s->show_mode == SHOW_MODE_WAVES) {
750             h= INT_MIN;
751             for(i=0; i<1000; i+=channels){
752                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
753                 int a= s->sample_array[idx];
754                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
755                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
756                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
757                 int score= a-d;
758                 if(h<score && (b^c)<0){
759                     h= score;
760                     i_start= idx;
761                 }
762             }
763         }
764
765         s->last_i_start = i_start;
766     } else {
767         i_start = s->last_i_start;
768     }
769
770     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
771     if (s->show_mode == SHOW_MODE_WAVES) {
772         fill_rectangle(screen,
773                        s->xleft, s->ytop, s->width, s->height,
774                        bgcolor);
775
776         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
777
778         /* total height for one channel */
779         h = s->height / nb_display_channels;
780         /* graph height / 2 */
781         h2 = (h * 9) / 20;
782         for(ch = 0;ch < nb_display_channels; ch++) {
783             i = i_start + ch;
784             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
785             for(x = 0; x < s->width; x++) {
786                 y = (s->sample_array[i] * h2) >> 15;
787                 if (y < 0) {
788                     y = -y;
789                     ys = y1 - y;
790                 } else {
791                     ys = y1;
792                 }
793                 fill_rectangle(screen,
794                                s->xleft + x, ys, 1, y,
795                                fgcolor);
796                 i += channels;
797                 if (i >= SAMPLE_ARRAY_SIZE)
798                     i -= SAMPLE_ARRAY_SIZE;
799             }
800         }
801
802         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
803
804         for(ch = 1;ch < nb_display_channels; ch++) {
805             y = s->ytop + ch * h;
806             fill_rectangle(screen,
807                            s->xleft, y, s->width, 1,
808                            fgcolor);
809         }
810         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
811     }else{
812         nb_display_channels= FFMIN(nb_display_channels, 2);
813         if(rdft_bits != s->rdft_bits){
814             av_rdft_end(s->rdft);
815             av_free(s->rdft_data);
816             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
817             s->rdft_bits= rdft_bits;
818             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
819         }
820         {
821             FFTSample *data[2];
822             for(ch = 0;ch < nb_display_channels; ch++) {
823                 data[ch] = s->rdft_data + 2*nb_freq*ch;
824                 i = i_start + ch;
825                 for(x = 0; x < 2*nb_freq; x++) {
826                     double w= (x-nb_freq)*(1.0/nb_freq);
827                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
828                     i += channels;
829                     if (i >= SAMPLE_ARRAY_SIZE)
830                         i -= SAMPLE_ARRAY_SIZE;
831                 }
832                 av_rdft_calc(s->rdft, data[ch]);
833             }
834             //least efficient way to do this, we should of course directly access it but its more than fast enough
835             for(y=0; y<s->height; y++){
836                 double w= 1/sqrt(nb_freq);
837                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
838                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
839                        + data[1][2*y+1]*data[1][2*y+1])) : a;
840                 a= FFMIN(a,255);
841                 b= FFMIN(b,255);
842                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
843
844                 fill_rectangle(screen,
845                             s->xpos, s->height-y, 1, 1,
846                             fgcolor);
847             }
848         }
849         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
850         s->xpos++;
851         if(s->xpos >= s->width)
852             s->xpos= s->xleft;
853     }
854 }
855
856 static void stream_close(VideoState *is)
857 {
858     VideoPicture *vp;
859     int i;
860     /* XXX: use a special url_shutdown call to abort parse cleanly */
861     is->abort_request = 1;
862     SDL_WaitThread(is->read_tid, NULL);
863     SDL_WaitThread(is->refresh_tid, NULL);
864
865     /* free all pictures */
866     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
867         vp = &is->pictq[i];
868 #if CONFIG_AVFILTER
869         if (vp->picref) {
870             avfilter_unref_buffer(vp->picref);
871             vp->picref = NULL;
872         }
873 #endif
874         if (vp->bmp) {
875             SDL_FreeYUVOverlay(vp->bmp);
876             vp->bmp = NULL;
877         }
878     }
879     SDL_DestroyMutex(is->pictq_mutex);
880     SDL_DestroyCond(is->pictq_cond);
881     SDL_DestroyMutex(is->subpq_mutex);
882     SDL_DestroyCond(is->subpq_cond);
883 #if !CONFIG_AVFILTER
884     if (is->img_convert_ctx)
885         sws_freeContext(is->img_convert_ctx);
886 #endif
887     av_free(is);
888 }
889
890 static void do_exit(void)
891 {
892     if (cur_stream) {
893         stream_close(cur_stream);
894         cur_stream = NULL;
895     }
896     uninit_opts();
897 #if CONFIG_AVFILTER
898     avfilter_uninit();
899 #endif
900     if (show_status)
901         printf("\n");
902     SDL_Quit();
903     av_log(NULL, AV_LOG_QUIET, "%s", "");
904     exit(0);
905 }
906
907 static int video_open(VideoState *is){
908     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
909     int w,h;
910
911     if(is_full_screen) flags |= SDL_FULLSCREEN;
912     else               flags |= SDL_RESIZABLE;
913
914     if (is_full_screen && fs_screen_width) {
915         w = fs_screen_width;
916         h = fs_screen_height;
917     } else if(!is_full_screen && screen_width){
918         w = screen_width;
919         h = screen_height;
920 #if CONFIG_AVFILTER
921     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
922         w = is->out_video_filter->inputs[0]->w;
923         h = is->out_video_filter->inputs[0]->h;
924 #else
925     }else if (is->video_st && is->video_st->codec->width){
926         w = is->video_st->codec->width;
927         h = is->video_st->codec->height;
928 #endif
929     } else {
930         w = 640;
931         h = 480;
932     }
933     if(screen && is->width == screen->w && screen->w == w
934        && is->height== screen->h && screen->h == h)
935         return 0;
936
937 #ifndef __APPLE__
938     screen = SDL_SetVideoMode(w, h, 0, flags);
939 #else
940     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
941     screen = SDL_SetVideoMode(w, h, 24, flags);
942 #endif
943     if (!screen) {
944         fprintf(stderr, "SDL: could not set video mode - exiting\n");
945         do_exit();
946     }
947     if (!window_title)
948         window_title = input_filename;
949     SDL_WM_SetCaption(window_title, window_title);
950
951     is->width = screen->w;
952     is->height = screen->h;
953
954     return 0;
955 }
956
957 /* display the current picture, if any */
958 static void video_display(VideoState *is)
959 {
960     if(!screen)
961         video_open(cur_stream);
962     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
963         video_audio_display(is);
964     else if (is->video_st)
965         video_image_display(is);
966 }
967
968 static int refresh_thread(void *opaque)
969 {
970     VideoState *is= opaque;
971     while(!is->abort_request){
972         SDL_Event event;
973         event.type = FF_REFRESH_EVENT;
974         event.user.data1 = opaque;
975         if(!is->refresh){
976             is->refresh=1;
977             SDL_PushEvent(&event);
978         }
979         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
980         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
981     }
982     return 0;
983 }
984
985 /* get the current audio clock value */
986 static double get_audio_clock(VideoState *is)
987 {
988     if (is->paused) {
989         return is->audio_current_pts;
990     } else {
991         return is->audio_current_pts_drift + av_gettime() / 1000000.0;
992     }
993 }
994
995 /* get the current video clock value */
996 static double get_video_clock(VideoState *is)
997 {
998     if (is->paused) {
999         return is->video_current_pts;
1000     } else {
1001         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1002     }
1003 }
1004
1005 /* get the current external clock value */
1006 static double get_external_clock(VideoState *is)
1007 {
1008     int64_t ti;
1009     ti = av_gettime();
1010     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1011 }
1012
1013 /* get the current master clock value */
1014 static double get_master_clock(VideoState *is)
1015 {
1016     double val;
1017
1018     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1019         if (is->video_st)
1020             val = get_video_clock(is);
1021         else
1022             val = get_audio_clock(is);
1023     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1024         if (is->audio_st)
1025             val = get_audio_clock(is);
1026         else
1027             val = get_video_clock(is);
1028     } else {
1029         val = get_external_clock(is);
1030     }
1031     return val;
1032 }
1033
1034 /* seek in the stream */
1035 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1036 {
1037     if (!is->seek_req) {
1038         is->seek_pos = pos;
1039         is->seek_rel = rel;
1040         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1041         if (seek_by_bytes)
1042             is->seek_flags |= AVSEEK_FLAG_BYTE;
1043         is->seek_req = 1;
1044     }
1045 }
1046
1047 /* pause or resume the video */
1048 static void stream_toggle_pause(VideoState *is)
1049 {
1050     if (is->paused) {
1051         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1052         if(is->read_pause_return != AVERROR(ENOSYS)){
1053             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1054         }
1055         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1056     }
1057     is->paused = !is->paused;
1058 }
1059
1060 static double compute_target_time(double frame_current_pts, VideoState *is)
1061 {
1062     double delay, sync_threshold, diff;
1063
1064     /* compute nominal delay */
1065     delay = frame_current_pts - is->frame_last_pts;
1066     if (delay <= 0 || delay >= 10.0) {
1067         /* if incorrect delay, use previous one */
1068         delay = is->frame_last_delay;
1069     } else {
1070         is->frame_last_delay = delay;
1071     }
1072     is->frame_last_pts = frame_current_pts;
1073
1074     /* update delay to follow master synchronisation source */
1075     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1076          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1077         /* if video is slave, we try to correct big delays by
1078            duplicating or deleting a frame */
1079         diff = get_video_clock(is) - get_master_clock(is);
1080
1081         /* skip or repeat frame. We take into account the
1082            delay to compute the threshold. I still don't know
1083            if it is the best guess */
1084         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1085         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1086             if (diff <= -sync_threshold)
1087                 delay = 0;
1088             else if (diff >= sync_threshold)
1089                 delay = 2 * delay;
1090         }
1091     }
1092     is->frame_timer += delay;
1093
1094     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1095             delay, frame_current_pts, -diff);
1096
1097     return is->frame_timer;
1098 }
1099
1100 /* called to display each frame */
1101 static void video_refresh(void *opaque)
1102 {
1103     VideoState *is = opaque;
1104     VideoPicture *vp;
1105
1106     SubPicture *sp, *sp2;
1107
1108     if (is->video_st) {
1109 retry:
1110         if (is->pictq_size == 0) {
1111             //nothing to do, no picture to display in the que
1112         } else {
1113             double time= av_gettime()/1000000.0;
1114             double next_target;
1115             /* dequeue the picture */
1116             vp = &is->pictq[is->pictq_rindex];
1117
1118             if(time < vp->target_clock)
1119                 return;
1120             /* update current video pts */
1121             is->video_current_pts = vp->pts;
1122             is->video_current_pts_drift = is->video_current_pts - time;
1123             is->video_current_pos = vp->pos;
1124             if(is->pictq_size > 1){
1125                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1126                 assert(nextvp->target_clock >= vp->target_clock);
1127                 next_target= nextvp->target_clock;
1128             }else{
1129                 next_target= vp->target_clock + vp->duration;
1130             }
1131             if((framedrop>0 || (framedrop && is->audio_st)) && time > next_target){
1132                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1133                 if(is->pictq_size > 1 || time > next_target + 0.5){
1134                     /* update queue size and signal for next picture */
1135                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1136                         is->pictq_rindex = 0;
1137
1138                     SDL_LockMutex(is->pictq_mutex);
1139                     is->pictq_size--;
1140                     SDL_CondSignal(is->pictq_cond);
1141                     SDL_UnlockMutex(is->pictq_mutex);
1142                     goto retry;
1143                 }
1144             }
1145
1146             if(is->subtitle_st) {
1147                 if (is->subtitle_stream_changed) {
1148                     SDL_LockMutex(is->subpq_mutex);
1149
1150                     while (is->subpq_size) {
1151                         free_subpicture(&is->subpq[is->subpq_rindex]);
1152
1153                         /* update queue size and signal for next picture */
1154                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1155                             is->subpq_rindex = 0;
1156
1157                         is->subpq_size--;
1158                     }
1159                     is->subtitle_stream_changed = 0;
1160
1161                     SDL_CondSignal(is->subpq_cond);
1162                     SDL_UnlockMutex(is->subpq_mutex);
1163                 } else {
1164                     if (is->subpq_size > 0) {
1165                         sp = &is->subpq[is->subpq_rindex];
1166
1167                         if (is->subpq_size > 1)
1168                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1169                         else
1170                             sp2 = NULL;
1171
1172                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1173                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1174                         {
1175                             free_subpicture(sp);
1176
1177                             /* update queue size and signal for next picture */
1178                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1179                                 is->subpq_rindex = 0;
1180
1181                             SDL_LockMutex(is->subpq_mutex);
1182                             is->subpq_size--;
1183                             SDL_CondSignal(is->subpq_cond);
1184                             SDL_UnlockMutex(is->subpq_mutex);
1185                         }
1186                     }
1187                 }
1188             }
1189
1190             /* display picture */
1191             if (!display_disable)
1192                 video_display(is);
1193
1194             /* update queue size and signal for next picture */
1195             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1196                 is->pictq_rindex = 0;
1197
1198             SDL_LockMutex(is->pictq_mutex);
1199             is->pictq_size--;
1200             SDL_CondSignal(is->pictq_cond);
1201             SDL_UnlockMutex(is->pictq_mutex);
1202         }
1203     } else if (is->audio_st) {
1204         /* draw the next audio frame */
1205
1206         /* if only audio stream, then display the audio bars (better
1207            than nothing, just to test the implementation */
1208
1209         /* display picture */
1210         if (!display_disable)
1211             video_display(is);
1212     }
1213     if (show_status) {
1214         static int64_t last_time;
1215         int64_t cur_time;
1216         int aqsize, vqsize, sqsize;
1217         double av_diff;
1218
1219         cur_time = av_gettime();
1220         if (!last_time || (cur_time - last_time) >= 30000) {
1221             aqsize = 0;
1222             vqsize = 0;
1223             sqsize = 0;
1224             if (is->audio_st)
1225                 aqsize = is->audioq.size;
1226             if (is->video_st)
1227                 vqsize = is->videoq.size;
1228             if (is->subtitle_st)
1229                 sqsize = is->subtitleq.size;
1230             av_diff = 0;
1231             if (is->audio_st && is->video_st)
1232                 av_diff = get_audio_clock(is) - get_video_clock(is);
1233             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1234                    get_master_clock(is),
1235                    av_diff,
1236                    FFMAX(is->skip_frames-1, 0),
1237                    aqsize / 1024,
1238                    vqsize / 1024,
1239                    sqsize,
1240                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1241                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1242             fflush(stdout);
1243             last_time = cur_time;
1244         }
1245     }
1246 }
1247
1248 /* allocate a picture (needs to do that in main thread to avoid
1249    potential locking problems */
1250 static void alloc_picture(void *opaque)
1251 {
1252     VideoState *is = opaque;
1253     VideoPicture *vp;
1254
1255     vp = &is->pictq[is->pictq_windex];
1256
1257     if (vp->bmp)
1258         SDL_FreeYUVOverlay(vp->bmp);
1259
1260 #if CONFIG_AVFILTER
1261     if (vp->picref)
1262         avfilter_unref_buffer(vp->picref);
1263     vp->picref = NULL;
1264
1265     vp->width   = is->out_video_filter->inputs[0]->w;
1266     vp->height  = is->out_video_filter->inputs[0]->h;
1267     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1268 #else
1269     vp->width   = is->video_st->codec->width;
1270     vp->height  = is->video_st->codec->height;
1271     vp->pix_fmt = is->video_st->codec->pix_fmt;
1272 #endif
1273
1274     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1275                                    SDL_YV12_OVERLAY,
1276                                    screen);
1277     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1278         /* SDL allocates a buffer smaller than requested if the video
1279          * overlay hardware is unable to support the requested size. */
1280         fprintf(stderr, "Error: the video system does not support an image\n"
1281                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1282                         "to reduce the image size.\n", vp->width, vp->height );
1283         do_exit();
1284     }
1285
1286     SDL_LockMutex(is->pictq_mutex);
1287     vp->allocated = 1;
1288     SDL_CondSignal(is->pictq_cond);
1289     SDL_UnlockMutex(is->pictq_mutex);
1290 }
1291
1292 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1293 {
1294     VideoPicture *vp;
1295     double frame_delay, pts = pts1;
1296
1297     /* compute the exact PTS for the picture if it is omitted in the stream
1298      * pts1 is the dts of the pkt / pts of the frame */
1299     if (pts != 0) {
1300         /* update video clock with pts, if present */
1301         is->video_clock = pts;
1302     } else {
1303         pts = is->video_clock;
1304     }
1305     /* update video clock for next frame */
1306     frame_delay = av_q2d(is->video_st->codec->time_base);
1307     /* for MPEG2, the frame can be repeated, so we update the
1308        clock accordingly */
1309     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1310     is->video_clock += frame_delay;
1311
1312 #if defined(DEBUG_SYNC) && 0
1313     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1314            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1315 #endif
1316
1317     /* wait until we have space to put a new picture */
1318     SDL_LockMutex(is->pictq_mutex);
1319
1320     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1321         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1322
1323     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1324            !is->videoq.abort_request) {
1325         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1326     }
1327     SDL_UnlockMutex(is->pictq_mutex);
1328
1329     if (is->videoq.abort_request)
1330         return -1;
1331
1332     vp = &is->pictq[is->pictq_windex];
1333
1334     vp->duration = frame_delay;
1335
1336     /* alloc or resize hardware picture buffer */
1337     if (!vp->bmp ||
1338 #if CONFIG_AVFILTER
1339         vp->width  != is->out_video_filter->inputs[0]->w ||
1340         vp->height != is->out_video_filter->inputs[0]->h) {
1341 #else
1342         vp->width != is->video_st->codec->width ||
1343         vp->height != is->video_st->codec->height) {
1344 #endif
1345         SDL_Event event;
1346
1347         vp->allocated = 0;
1348
1349         /* the allocation must be done in the main thread to avoid
1350            locking problems */
1351         event.type = FF_ALLOC_EVENT;
1352         event.user.data1 = is;
1353         SDL_PushEvent(&event);
1354
1355         /* wait until the picture is allocated */
1356         SDL_LockMutex(is->pictq_mutex);
1357         while (!vp->allocated && !is->videoq.abort_request) {
1358             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1359         }
1360         SDL_UnlockMutex(is->pictq_mutex);
1361
1362         if (is->videoq.abort_request)
1363             return -1;
1364     }
1365
1366     /* if the frame is not skipped, then display it */
1367     if (vp->bmp) {
1368         AVPicture pict;
1369 #if CONFIG_AVFILTER
1370         if(vp->picref)
1371             avfilter_unref_buffer(vp->picref);
1372         vp->picref = src_frame->opaque;
1373 #endif
1374
1375         /* get a pointer on the bitmap */
1376         SDL_LockYUVOverlay (vp->bmp);
1377
1378         memset(&pict,0,sizeof(AVPicture));
1379         pict.data[0] = vp->bmp->pixels[0];
1380         pict.data[1] = vp->bmp->pixels[2];
1381         pict.data[2] = vp->bmp->pixels[1];
1382
1383         pict.linesize[0] = vp->bmp->pitches[0];
1384         pict.linesize[1] = vp->bmp->pitches[2];
1385         pict.linesize[2] = vp->bmp->pitches[1];
1386
1387 #if CONFIG_AVFILTER
1388         //FIXME use direct rendering
1389         av_picture_copy(&pict, (AVPicture *)src_frame,
1390                         vp->pix_fmt, vp->width, vp->height);
1391 #else
1392         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1393         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1394             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1395             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1396         if (is->img_convert_ctx == NULL) {
1397             fprintf(stderr, "Cannot initialize the conversion context\n");
1398             exit(1);
1399         }
1400         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1401                   0, vp->height, pict.data, pict.linesize);
1402 #endif
1403         /* update the bitmap content */
1404         SDL_UnlockYUVOverlay(vp->bmp);
1405
1406         vp->pts = pts;
1407         vp->pos = pos;
1408
1409         /* now we can update the picture count */
1410         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1411             is->pictq_windex = 0;
1412         SDL_LockMutex(is->pictq_mutex);
1413         vp->target_clock= compute_target_time(vp->pts, is);
1414
1415         is->pictq_size++;
1416         SDL_UnlockMutex(is->pictq_mutex);
1417     }
1418     return 0;
1419 }
1420
1421 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1422 {
1423     int got_picture, i;
1424
1425     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1426         return -1;
1427
1428     if (pkt->data == flush_pkt.data) {
1429         avcodec_flush_buffers(is->video_st->codec);
1430
1431         SDL_LockMutex(is->pictq_mutex);
1432         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1433         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1434             is->pictq[i].target_clock= 0;
1435         }
1436         while (is->pictq_size && !is->videoq.abort_request) {
1437             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1438         }
1439         is->video_current_pos = -1;
1440         SDL_UnlockMutex(is->pictq_mutex);
1441
1442         is->frame_last_pts = AV_NOPTS_VALUE;
1443         is->frame_last_delay = 0;
1444         is->frame_timer = (double)av_gettime() / 1000000.0;
1445         is->skip_frames = 1;
1446         is->skip_frames_index = 0;
1447         return 0;
1448     }
1449
1450     avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);
1451
1452     if (got_picture) {
1453         if (decoder_reorder_pts == -1) {
1454             *pts = frame->best_effort_timestamp;
1455         } else if (decoder_reorder_pts) {
1456             *pts = frame->pkt_pts;
1457         } else {
1458             *pts = frame->pkt_dts;
1459         }
1460
1461         if (*pts == AV_NOPTS_VALUE) {
1462             *pts = 0;
1463         }
1464
1465         is->skip_frames_index += 1;
1466         if(is->skip_frames_index >= is->skip_frames){
1467             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1468             return 1;
1469         }
1470
1471     }
1472     return 0;
1473 }
1474
1475 #if CONFIG_AVFILTER
1476 typedef struct {
1477     VideoState *is;
1478     AVFrame *frame;
1479     int use_dr1;
1480 } FilterPriv;
1481
1482 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1483 {
1484     AVFilterContext *ctx = codec->opaque;
1485     AVFilterBufferRef  *ref;
1486     int perms = AV_PERM_WRITE;
1487     int i, w, h, stride[4];
1488     unsigned edge;
1489     int pixel_size;
1490
1491     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1492
1493     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1494         perms |= AV_PERM_NEG_LINESIZES;
1495
1496     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1497         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1498         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1499         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1500     }
1501     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1502
1503     w = codec->width;
1504     h = codec->height;
1505
1506     if(av_image_check_size(w, h, 0, codec))
1507         return -1;
1508
1509     avcodec_align_dimensions2(codec, &w, &h, stride);
1510     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1511     w += edge << 1;
1512     h += edge << 1;
1513
1514     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1515         return -1;
1516
1517     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1518     ref->video->w = codec->width;
1519     ref->video->h = codec->height;
1520     for(i = 0; i < 4; i ++) {
1521         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1522         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1523
1524         if (ref->data[i]) {
1525             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1526         }
1527         pic->data[i]     = ref->data[i];
1528         pic->linesize[i] = ref->linesize[i];
1529     }
1530     pic->opaque = ref;
1531     pic->age    = INT_MAX;
1532     pic->type   = FF_BUFFER_TYPE_USER;
1533     pic->reordered_opaque = codec->reordered_opaque;
1534     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1535     else           pic->pkt_pts = AV_NOPTS_VALUE;
1536     return 0;
1537 }
1538
1539 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1540 {
1541     memset(pic->data, 0, sizeof(pic->data));
1542     avfilter_unref_buffer(pic->opaque);
1543 }
1544
1545 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1546 {
1547     AVFilterBufferRef *ref = pic->opaque;
1548
1549     if (pic->data[0] == NULL) {
1550         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1551         return codec->get_buffer(codec, pic);
1552     }
1553
1554     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1555         (codec->pix_fmt != ref->format)) {
1556         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1557         return -1;
1558     }
1559
1560     pic->reordered_opaque = codec->reordered_opaque;
1561     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1562     else           pic->pkt_pts = AV_NOPTS_VALUE;
1563     return 0;
1564 }
1565
1566 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1567 {
1568     FilterPriv *priv = ctx->priv;
1569     AVCodecContext *codec;
1570     if(!opaque) return -1;
1571
1572     priv->is = opaque;
1573     codec    = priv->is->video_st->codec;
1574     codec->opaque = ctx;
1575     if((codec->codec->capabilities & CODEC_CAP_DR1)
1576     ) {
1577         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1578         priv->use_dr1 = 1;
1579         codec->get_buffer     = input_get_buffer;
1580         codec->release_buffer = input_release_buffer;
1581         codec->reget_buffer   = input_reget_buffer;
1582         codec->thread_safe_callbacks = 1;
1583     }
1584
1585     priv->frame = avcodec_alloc_frame();
1586
1587     return 0;
1588 }
1589
1590 static void input_uninit(AVFilterContext *ctx)
1591 {
1592     FilterPriv *priv = ctx->priv;
1593     av_free(priv->frame);
1594 }
1595
1596 static int input_request_frame(AVFilterLink *link)
1597 {
1598     FilterPriv *priv = link->src->priv;
1599     AVFilterBufferRef *picref;
1600     int64_t pts = 0;
1601     AVPacket pkt;
1602     int ret;
1603
1604     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1605         av_free_packet(&pkt);
1606     if (ret < 0)
1607         return -1;
1608
1609     if(priv->use_dr1 && priv->frame->opaque) {
1610         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1611     } else {
1612         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1613         av_image_copy(picref->data, picref->linesize,
1614                       priv->frame->data, priv->frame->linesize,
1615                       picref->format, link->w, link->h);
1616     }
1617     av_free_packet(&pkt);
1618
1619     avfilter_copy_frame_props(picref, priv->frame);
1620     picref->pts = pts;
1621
1622     avfilter_start_frame(link, picref);
1623     avfilter_draw_slice(link, 0, link->h, 1);
1624     avfilter_end_frame(link);
1625
1626     return 0;
1627 }
1628
1629 static int input_query_formats(AVFilterContext *ctx)
1630 {
1631     FilterPriv *priv = ctx->priv;
1632     enum PixelFormat pix_fmts[] = {
1633         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1634     };
1635
1636     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1637     return 0;
1638 }
1639
1640 static int input_config_props(AVFilterLink *link)
1641 {
1642     FilterPriv *priv  = link->src->priv;
1643     AVCodecContext *c = priv->is->video_st->codec;
1644     AVStream *s = priv->is->video_st;
1645
1646     link->w = c->width;
1647     link->h = c->height;
1648     link->sample_aspect_ratio = s->sample_aspect_ratio.num ?
1649         s->sample_aspect_ratio : c->sample_aspect_ratio;
1650     link->time_base = priv->is->video_st->time_base;
1651
1652     return 0;
1653 }
1654
1655 static AVFilter input_filter =
1656 {
1657     .name      = "ffplay_input",
1658
1659     .priv_size = sizeof(FilterPriv),
1660
1661     .init      = input_init,
1662     .uninit    = input_uninit,
1663
1664     .query_formats = input_query_formats,
1665
1666     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1667     .outputs   = (AVFilterPad[]) {{ .name = "default",
1668                                     .type = AVMEDIA_TYPE_VIDEO,
1669                                     .request_frame = input_request_frame,
1670                                     .config_props  = input_config_props, },
1671                                   { .name = NULL }},
1672 };
1673
1674 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1675 {
1676     char sws_flags_str[128];
1677     int ret;
1678     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1679     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1680     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1681     graph->scale_sws_opts = av_strdup(sws_flags_str);
1682
1683     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1684                                             NULL, is, graph)) < 0)
1685         return ret;
1686     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1687                                             NULL, pix_fmts, graph)) < 0)
1688         return ret;
1689
1690     if(vfilters) {
1691         AVFilterInOut *outputs = avfilter_inout_alloc();
1692         AVFilterInOut *inputs  = avfilter_inout_alloc();
1693
1694         outputs->name    = av_strdup("in");
1695         outputs->filter_ctx = filt_src;
1696         outputs->pad_idx = 0;
1697         outputs->next    = NULL;
1698
1699         inputs->name    = av_strdup("out");
1700         inputs->filter_ctx = filt_out;
1701         inputs->pad_idx = 0;
1702         inputs->next    = NULL;
1703
1704         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1705             return ret;
1706         av_freep(&vfilters);
1707     } else {
1708         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1709             return ret;
1710     }
1711
1712     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1713         return ret;
1714
1715     is->out_video_filter = filt_out;
1716
1717     return ret;
1718 }
1719
1720 #endif  /* CONFIG_AVFILTER */
1721
1722 static int video_thread(void *arg)
1723 {
1724     VideoState *is = arg;
1725     AVFrame *frame= avcodec_alloc_frame();
1726     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1727     double pts;
1728     int ret;
1729
1730 #if CONFIG_AVFILTER
1731     AVFilterGraph *graph = avfilter_graph_alloc();
1732     AVFilterContext *filt_out = NULL;
1733     int last_w = is->video_st->codec->width;
1734     int last_h = is->video_st->codec->height;
1735
1736     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1737         goto the_end;
1738     filt_out = is->out_video_filter;
1739 #endif
1740
1741     for(;;) {
1742 #if !CONFIG_AVFILTER
1743         AVPacket pkt;
1744 #else
1745         AVFilterBufferRef *picref;
1746         AVRational tb = filt_out->inputs[0]->time_base;
1747 #endif
1748         while (is->paused && !is->videoq.abort_request)
1749             SDL_Delay(10);
1750 #if CONFIG_AVFILTER
1751         if (   last_w != is->video_st->codec->width
1752             || last_h != is->video_st->codec->height) {
1753             av_dlog(NULL, "Changing size %dx%d -> %dx%d\n", last_w, last_h,
1754                     is->video_st->codec->width, is->video_st->codec->height);
1755             avfilter_graph_free(&graph);
1756             graph = avfilter_graph_alloc();
1757             if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1758                 goto the_end;
1759             filt_out = is->out_video_filter;
1760             last_w = is->video_st->codec->width;
1761             last_h = is->video_st->codec->height;
1762         }
1763         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1764         if (picref) {
1765             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1766             pts_int = picref->pts;
1767             pos     = picref->pos;
1768             frame->opaque = picref;
1769         }
1770
1771         if (av_cmp_q(tb, is->video_st->time_base)) {
1772             av_unused int64_t pts1 = pts_int;
1773             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1774             av_dlog(NULL, "video_thread(): "
1775                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1776                     tb.num, tb.den, pts1,
1777                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1778         }
1779 #else
1780         ret = get_video_frame(is, frame, &pts_int, &pkt);
1781         pos = pkt.pos;
1782         av_free_packet(&pkt);
1783 #endif
1784
1785         if (ret < 0) goto the_end;
1786
1787 #if CONFIG_AVFILTER
1788         if (!picref)
1789             continue;
1790 #endif
1791
1792         pts = pts_int*av_q2d(is->video_st->time_base);
1793
1794         ret = queue_picture(is, frame, pts, pos);
1795
1796         if (ret < 0)
1797             goto the_end;
1798
1799         if (step)
1800             if (cur_stream)
1801                 stream_toggle_pause(cur_stream);
1802     }
1803  the_end:
1804 #if CONFIG_AVFILTER
1805     avfilter_graph_free(&graph);
1806 #endif
1807     av_free(frame);
1808     return 0;
1809 }
1810
1811 static int subtitle_thread(void *arg)
1812 {
1813     VideoState *is = arg;
1814     SubPicture *sp;
1815     AVPacket pkt1, *pkt = &pkt1;
1816     int got_subtitle;
1817     double pts;
1818     int i, j;
1819     int r, g, b, y, u, v, a;
1820
1821     for(;;) {
1822         while (is->paused && !is->subtitleq.abort_request) {
1823             SDL_Delay(10);
1824         }
1825         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1826             break;
1827
1828         if(pkt->data == flush_pkt.data){
1829             avcodec_flush_buffers(is->subtitle_st->codec);
1830             continue;
1831         }
1832         SDL_LockMutex(is->subpq_mutex);
1833         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1834                !is->subtitleq.abort_request) {
1835             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1836         }
1837         SDL_UnlockMutex(is->subpq_mutex);
1838
1839         if (is->subtitleq.abort_request)
1840             return 0;
1841
1842         sp = &is->subpq[is->subpq_windex];
1843
1844        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1845            this packet, if any */
1846         pts = 0;
1847         if (pkt->pts != AV_NOPTS_VALUE)
1848             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1849
1850         avcodec_decode_subtitle2(is->subtitle_st->codec, &sp->sub,
1851                                  &got_subtitle, pkt);
1852
1853         if (got_subtitle && sp->sub.format == 0) {
1854             sp->pts = pts;
1855
1856             for (i = 0; i < sp->sub.num_rects; i++)
1857             {
1858                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1859                 {
1860                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1861                     y = RGB_TO_Y_CCIR(r, g, b);
1862                     u = RGB_TO_U_CCIR(r, g, b, 0);
1863                     v = RGB_TO_V_CCIR(r, g, b, 0);
1864                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1865                 }
1866             }
1867
1868             /* now we can update the picture count */
1869             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1870                 is->subpq_windex = 0;
1871             SDL_LockMutex(is->subpq_mutex);
1872             is->subpq_size++;
1873             SDL_UnlockMutex(is->subpq_mutex);
1874         }
1875         av_free_packet(pkt);
1876     }
1877     return 0;
1878 }
1879
1880 /* copy samples for viewing in editor window */
1881 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1882 {
1883     int size, len;
1884
1885     size = samples_size / sizeof(short);
1886     while (size > 0) {
1887         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1888         if (len > size)
1889             len = size;
1890         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1891         samples += len;
1892         is->sample_array_index += len;
1893         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1894             is->sample_array_index = 0;
1895         size -= len;
1896     }
1897 }
1898
1899 /* return the new audio buffer size (samples can be added or deleted
1900    to get better sync if video or external master clock) */
1901 static int synchronize_audio(VideoState *is, short *samples,
1902                              int samples_size1, double pts)
1903 {
1904     int n, samples_size;
1905     double ref_clock;
1906
1907     n = 2 * is->audio_st->codec->channels;
1908     samples_size = samples_size1;
1909
1910     /* if not master, then we try to remove or add samples to correct the clock */
1911     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1912          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1913         double diff, avg_diff;
1914         int wanted_size, min_size, max_size, nb_samples;
1915
1916         ref_clock = get_master_clock(is);
1917         diff = get_audio_clock(is) - ref_clock;
1918
1919         if (diff < AV_NOSYNC_THRESHOLD) {
1920             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1921             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1922                 /* not enough measures to have a correct estimate */
1923                 is->audio_diff_avg_count++;
1924             } else {
1925                 /* estimate the A-V difference */
1926                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1927
1928                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1929                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1930                     nb_samples = samples_size / n;
1931
1932                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1933                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1934                     if (wanted_size < min_size)
1935                         wanted_size = min_size;
1936                     else if (wanted_size > max_size)
1937                         wanted_size = max_size;
1938
1939                     /* add or remove samples to correction the synchro */
1940                     if (wanted_size < samples_size) {
1941                         /* remove samples */
1942                         samples_size = wanted_size;
1943                     } else if (wanted_size > samples_size) {
1944                         uint8_t *samples_end, *q;
1945                         int nb;
1946
1947                         /* add samples */
1948                         nb = (samples_size - wanted_size);
1949                         samples_end = (uint8_t *)samples + samples_size - n;
1950                         q = samples_end + n;
1951                         while (nb > 0) {
1952                             memcpy(q, samples_end, n);
1953                             q += n;
1954                             nb -= n;
1955                         }
1956                         samples_size = wanted_size;
1957                     }
1958                 }
1959 #if 0
1960                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1961                        diff, avg_diff, samples_size - samples_size1,
1962                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1963 #endif
1964             }
1965         } else {
1966             /* too big difference : may be initial PTS errors, so
1967                reset A-V filter */
1968             is->audio_diff_avg_count = 0;
1969             is->audio_diff_cum = 0;
1970         }
1971     }
1972
1973     return samples_size;
1974 }
1975
1976 /* decode one audio frame and returns its uncompressed size */
1977 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1978 {
1979     AVPacket *pkt_temp = &is->audio_pkt_temp;
1980     AVPacket *pkt = &is->audio_pkt;
1981     AVCodecContext *dec= is->audio_st->codec;
1982     int n, len1, data_size;
1983     double pts;
1984
1985     for(;;) {
1986         /* NOTE: the audio packet can contain several frames */
1987         while (pkt_temp->size > 0) {
1988             data_size = sizeof(is->audio_buf1);
1989             len1 = avcodec_decode_audio3(dec,
1990                                         (int16_t *)is->audio_buf1, &data_size,
1991                                         pkt_temp);
1992             if (len1 < 0) {
1993                 /* if error, we skip the frame */
1994                 pkt_temp->size = 0;
1995                 break;
1996             }
1997
1998             pkt_temp->data += len1;
1999             pkt_temp->size -= len1;
2000             if (data_size <= 0)
2001                 continue;
2002
2003             if (dec->sample_fmt != is->audio_src_fmt) {
2004                 if (is->reformat_ctx)
2005                     av_audio_convert_free(is->reformat_ctx);
2006                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2007                                                          dec->sample_fmt, 1, NULL, 0);
2008                 if (!is->reformat_ctx) {
2009                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2010                         av_get_sample_fmt_name(dec->sample_fmt),
2011                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2012                         break;
2013                 }
2014                 is->audio_src_fmt= dec->sample_fmt;
2015             }
2016
2017             if (is->reformat_ctx) {
2018                 const void *ibuf[6]= {is->audio_buf1};
2019                 void *obuf[6]= {is->audio_buf2};
2020                 int istride[6]= {av_get_bytes_per_sample(dec->sample_fmt)};
2021                 int ostride[6]= {2};
2022                 int len= data_size/istride[0];
2023                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2024                     printf("av_audio_convert() failed\n");
2025                     break;
2026                 }
2027                 is->audio_buf= is->audio_buf2;
2028                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2029                           remove this legacy cruft */
2030                 data_size= len*2;
2031             }else{
2032                 is->audio_buf= is->audio_buf1;
2033             }
2034
2035             /* if no pts, then compute it */
2036             pts = is->audio_clock;
2037             *pts_ptr = pts;
2038             n = 2 * dec->channels;
2039             is->audio_clock += (double)data_size /
2040                 (double)(n * dec->sample_rate);
2041 #ifdef DEBUG
2042             {
2043                 static double last_clock;
2044                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2045                        is->audio_clock - last_clock,
2046                        is->audio_clock, pts);
2047                 last_clock = is->audio_clock;
2048             }
2049 #endif
2050             return data_size;
2051         }
2052
2053         /* free the current packet */
2054         if (pkt->data)
2055             av_free_packet(pkt);
2056
2057         if (is->paused || is->audioq.abort_request) {
2058             return -1;
2059         }
2060
2061         /* read next packet */
2062         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2063             return -1;
2064         if(pkt->data == flush_pkt.data){
2065             avcodec_flush_buffers(dec);
2066             continue;
2067         }
2068
2069         pkt_temp->data = pkt->data;
2070         pkt_temp->size = pkt->size;
2071
2072         /* if update the audio clock with the pts */
2073         if (pkt->pts != AV_NOPTS_VALUE) {
2074             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2075         }
2076     }
2077 }
2078
2079 /* prepare a new audio buffer */
2080 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2081 {
2082     VideoState *is = opaque;
2083     int audio_size, len1;
2084     int bytes_per_sec;
2085     double pts;
2086
2087     audio_callback_time = av_gettime();
2088
2089     while (len > 0) {
2090         if (is->audio_buf_index >= is->audio_buf_size) {
2091            audio_size = audio_decode_frame(is, &pts);
2092            if (audio_size < 0) {
2093                 /* if error, just output silence */
2094                is->audio_buf = is->audio_buf1;
2095                is->audio_buf_size = 1024;
2096                memset(is->audio_buf, 0, is->audio_buf_size);
2097            } else {
2098                if (is->show_mode != SHOW_MODE_VIDEO)
2099                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2100                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2101                                               pts);
2102                is->audio_buf_size = audio_size;
2103            }
2104            is->audio_buf_index = 0;
2105         }
2106         len1 = is->audio_buf_size - is->audio_buf_index;
2107         if (len1 > len)
2108             len1 = len;
2109         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2110         len -= len1;
2111         stream += len1;
2112         is->audio_buf_index += len1;
2113     }
2114     bytes_per_sec = is->audio_st->codec->sample_rate *
2115             2 * is->audio_st->codec->channels;
2116     is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
2117     /* Let's assume the audio driver that is used by SDL has two periods. */
2118     is->audio_current_pts = is->audio_clock - (double)(2 * is->audio_hw_buf_size + is->audio_write_buf_size) / bytes_per_sec;
2119     is->audio_current_pts_drift = is->audio_current_pts - audio_callback_time / 1000000.0;
2120 }
2121
2122 /* open a given stream. Return 0 if OK */
2123 static int stream_component_open(VideoState *is, int stream_index)
2124 {
2125     AVFormatContext *ic = is->ic;
2126     AVCodecContext *avctx;
2127     AVCodec *codec;
2128     SDL_AudioSpec wanted_spec, spec;
2129     AVDictionary *opts;
2130     AVDictionaryEntry *t = NULL;
2131
2132     if (stream_index < 0 || stream_index >= ic->nb_streams)
2133         return -1;
2134     avctx = ic->streams[stream_index]->codec;
2135
2136     opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index]);
2137
2138     /* prepare audio output */
2139     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2140         if (avctx->channels > 0) {
2141             avctx->request_channels = FFMIN(2, avctx->channels);
2142         } else {
2143             avctx->request_channels = 2;
2144         }
2145     }
2146
2147     codec = avcodec_find_decoder(avctx->codec_id);
2148     if (!codec)
2149         return -1;
2150
2151     avctx->workaround_bugs = workaround_bugs;
2152     avctx->lowres = lowres;
2153     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2154     avctx->idct_algo= idct;
2155     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2156     avctx->skip_frame= skip_frame;
2157     avctx->skip_idct= skip_idct;
2158     avctx->skip_loop_filter= skip_loop_filter;
2159     avctx->error_recognition= error_recognition;
2160     avctx->error_concealment= error_concealment;
2161     avctx->thread_count= thread_count;
2162
2163     if(codec->capabilities & CODEC_CAP_DR1)
2164         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2165
2166     if (!codec ||
2167         avcodec_open2(avctx, codec, &opts) < 0)
2168         return -1;
2169     if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2170         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2171         return AVERROR_OPTION_NOT_FOUND;
2172     }
2173
2174     /* prepare audio output */
2175     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2176         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2177             fprintf(stderr, "Invalid sample rate or channel count\n");
2178             return -1;
2179         }
2180         wanted_spec.freq = avctx->sample_rate;
2181         wanted_spec.format = AUDIO_S16SYS;
2182         wanted_spec.channels = avctx->channels;
2183         wanted_spec.silence = 0;
2184         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2185         wanted_spec.callback = sdl_audio_callback;
2186         wanted_spec.userdata = is;
2187         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2188             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2189             return -1;
2190         }
2191         is->audio_hw_buf_size = spec.size;
2192         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2193     }
2194
2195     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2196     switch(avctx->codec_type) {
2197     case AVMEDIA_TYPE_AUDIO:
2198         is->audio_stream = stream_index;
2199         is->audio_st = ic->streams[stream_index];
2200         is->audio_buf_size = 0;
2201         is->audio_buf_index = 0;
2202
2203         /* init averaging filter */
2204         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2205         is->audio_diff_avg_count = 0;
2206         /* since we do not have a precise anough audio fifo fullness,
2207            we correct audio sync only if larger than this threshold */
2208         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2209
2210         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2211         packet_queue_init(&is->audioq);
2212         SDL_PauseAudio(0);
2213         break;
2214     case AVMEDIA_TYPE_VIDEO:
2215         is->video_stream = stream_index;
2216         is->video_st = ic->streams[stream_index];
2217
2218         packet_queue_init(&is->videoq);
2219         is->video_tid = SDL_CreateThread(video_thread, is);
2220         break;
2221     case AVMEDIA_TYPE_SUBTITLE:
2222         is->subtitle_stream = stream_index;
2223         is->subtitle_st = ic->streams[stream_index];
2224         packet_queue_init(&is->subtitleq);
2225
2226         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2227         break;
2228     default:
2229         break;
2230     }
2231     return 0;
2232 }
2233
2234 static void stream_component_close(VideoState *is, int stream_index)
2235 {
2236     AVFormatContext *ic = is->ic;
2237     AVCodecContext *avctx;
2238
2239     if (stream_index < 0 || stream_index >= ic->nb_streams)
2240         return;
2241     avctx = ic->streams[stream_index]->codec;
2242
2243     switch(avctx->codec_type) {
2244     case AVMEDIA_TYPE_AUDIO:
2245         packet_queue_abort(&is->audioq);
2246
2247         SDL_CloseAudio();
2248
2249         packet_queue_end(&is->audioq);
2250         if (is->reformat_ctx)
2251             av_audio_convert_free(is->reformat_ctx);
2252         is->reformat_ctx = NULL;
2253         break;
2254     case AVMEDIA_TYPE_VIDEO:
2255         packet_queue_abort(&is->videoq);
2256
2257         /* note: we also signal this mutex to make sure we deblock the
2258            video thread in all cases */
2259         SDL_LockMutex(is->pictq_mutex);
2260         SDL_CondSignal(is->pictq_cond);
2261         SDL_UnlockMutex(is->pictq_mutex);
2262
2263         SDL_WaitThread(is->video_tid, NULL);
2264
2265         packet_queue_end(&is->videoq);
2266         break;
2267     case AVMEDIA_TYPE_SUBTITLE:
2268         packet_queue_abort(&is->subtitleq);
2269
2270         /* note: we also signal this mutex to make sure we deblock the
2271            video thread in all cases */
2272         SDL_LockMutex(is->subpq_mutex);
2273         is->subtitle_stream_changed = 1;
2274
2275         SDL_CondSignal(is->subpq_cond);
2276         SDL_UnlockMutex(is->subpq_mutex);
2277
2278         SDL_WaitThread(is->subtitle_tid, NULL);
2279
2280         packet_queue_end(&is->subtitleq);
2281         break;
2282     default:
2283         break;
2284     }
2285
2286     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2287     avcodec_close(avctx);
2288     switch(avctx->codec_type) {
2289     case AVMEDIA_TYPE_AUDIO:
2290         is->audio_st = NULL;
2291         is->audio_stream = -1;
2292         break;
2293     case AVMEDIA_TYPE_VIDEO:
2294         is->video_st = NULL;
2295         is->video_stream = -1;
2296         break;
2297     case AVMEDIA_TYPE_SUBTITLE:
2298         is->subtitle_st = NULL;
2299         is->subtitle_stream = -1;
2300         break;
2301     default:
2302         break;
2303     }
2304 }
2305
2306 /* since we have only one decoding thread, we can use a global
2307    variable instead of a thread local variable */
2308 static VideoState *global_video_state;
2309
2310 static int decode_interrupt_cb(void)
2311 {
2312     return (global_video_state && global_video_state->abort_request);
2313 }
2314
2315 /* this thread gets the stream from the disk or the network */
2316 static int read_thread(void *arg)
2317 {
2318     VideoState *is = arg;
2319     AVFormatContext *ic = NULL;
2320     int err, i, ret;
2321     int st_index[AVMEDIA_TYPE_NB];
2322     AVPacket pkt1, *pkt = &pkt1;
2323     int eof=0;
2324     int pkt_in_play_range = 0;
2325     AVDictionaryEntry *t;
2326     AVDictionary **opts;
2327     int orig_nb_streams;
2328
2329     memset(st_index, -1, sizeof(st_index));
2330     is->video_stream = -1;
2331     is->audio_stream = -1;
2332     is->subtitle_stream = -1;
2333
2334     global_video_state = is;
2335     avio_set_interrupt_cb(decode_interrupt_cb);
2336
2337     err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2338     if (err < 0) {
2339         print_error(is->filename, err);
2340         ret = -1;
2341         goto fail;
2342     }
2343     if ((t = av_dict_get(format_opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2344         av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2345         ret = AVERROR_OPTION_NOT_FOUND;
2346         goto fail;
2347     }
2348     is->ic = ic;
2349
2350     if(genpts)
2351         ic->flags |= AVFMT_FLAG_GENPTS;
2352
2353     opts = setup_find_stream_info_opts(ic, codec_opts);
2354     orig_nb_streams = ic->nb_streams;
2355
2356     err = avformat_find_stream_info(ic, opts);
2357     if (err < 0) {
2358         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2359         ret = -1;
2360         goto fail;
2361     }
2362     for (i = 0; i < orig_nb_streams; i++)
2363         av_dict_free(&opts[i]);
2364     av_freep(&opts);
2365
2366     if(ic->pb)
2367         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2368
2369     if(seek_by_bytes<0)
2370         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2371
2372     /* if seeking requested, we execute it */
2373     if (start_time != AV_NOPTS_VALUE) {
2374         int64_t timestamp;
2375
2376         timestamp = start_time;
2377         /* add the stream start time */
2378         if (ic->start_time != AV_NOPTS_VALUE)
2379             timestamp += ic->start_time;
2380         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2381         if (ret < 0) {
2382             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2383                     is->filename, (double)timestamp / AV_TIME_BASE);
2384         }
2385     }
2386
2387     for (i = 0; i < ic->nb_streams; i++)
2388         ic->streams[i]->discard = AVDISCARD_ALL;
2389     if (!video_disable)
2390         st_index[AVMEDIA_TYPE_VIDEO] =
2391             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2392                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2393     if (!audio_disable)
2394         st_index[AVMEDIA_TYPE_AUDIO] =
2395             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2396                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2397                                 st_index[AVMEDIA_TYPE_VIDEO],
2398                                 NULL, 0);
2399     if (!video_disable)
2400         st_index[AVMEDIA_TYPE_SUBTITLE] =
2401             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2402                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2403                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2404                                  st_index[AVMEDIA_TYPE_AUDIO] :
2405                                  st_index[AVMEDIA_TYPE_VIDEO]),
2406                                 NULL, 0);
2407     if (show_status) {
2408         av_dump_format(ic, 0, is->filename, 0);
2409     }
2410
2411     is->show_mode = show_mode;
2412
2413     /* open the streams */
2414     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2415         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2416     }
2417
2418     ret=-1;
2419     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2420         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2421     }
2422     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2423     if (is->show_mode == SHOW_MODE_NONE)
2424         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2425
2426     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2427         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2428     }
2429
2430     if (is->video_stream < 0 && is->audio_stream < 0) {
2431         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2432         ret = -1;
2433         goto fail;
2434     }
2435
2436     for(;;) {
2437         if (is->abort_request)
2438             break;
2439         if (is->paused != is->last_paused) {
2440             is->last_paused = is->paused;
2441             if (is->paused)
2442                 is->read_pause_return= av_read_pause(ic);
2443             else
2444                 av_read_play(ic);
2445         }
2446 #if CONFIG_RTSP_DEMUXER
2447         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2448             /* wait 10 ms to avoid trying to get another packet */
2449             /* XXX: horrible */
2450             SDL_Delay(10);
2451             continue;
2452         }
2453 #endif
2454         if (is->seek_req) {
2455             int64_t seek_target= is->seek_pos;
2456             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2457             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2458 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2459 //      of the seek_pos/seek_rel variables
2460
2461             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2462             if (ret < 0) {
2463                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2464             }else{
2465                 if (is->audio_stream >= 0) {
2466                     packet_queue_flush(&is->audioq);
2467                     packet_queue_put(&is->audioq, &flush_pkt);
2468                 }
2469                 if (is->subtitle_stream >= 0) {
2470                     packet_queue_flush(&is->subtitleq);
2471                     packet_queue_put(&is->subtitleq, &flush_pkt);
2472                 }
2473                 if (is->video_stream >= 0) {
2474                     packet_queue_flush(&is->videoq);
2475                     packet_queue_put(&is->videoq, &flush_pkt);
2476                 }
2477             }
2478             is->seek_req = 0;
2479             eof= 0;
2480         }
2481
2482         /* if the queue are full, no need to read more */
2483         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2484             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2485                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2486                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2487             /* wait 10 ms */
2488             SDL_Delay(10);
2489             continue;
2490         }
2491         if(eof) {
2492             if(is->video_stream >= 0){
2493                 av_init_packet(pkt);
2494                 pkt->data=NULL;
2495                 pkt->size=0;
2496                 pkt->stream_index= is->video_stream;
2497                 packet_queue_put(&is->videoq, pkt);
2498             }
2499             SDL_Delay(10);
2500             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2501                 if(loop!=1 && (!loop || --loop)){
2502                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2503                 }else if(autoexit){
2504                     ret=AVERROR_EOF;
2505                     goto fail;
2506                 }
2507             }
2508             eof=0;
2509             continue;
2510         }
2511         ret = av_read_frame(ic, pkt);
2512         if (ret < 0) {
2513             if (ret == AVERROR_EOF || url_feof(ic->pb))
2514                 eof=1;
2515             if (ic->pb && ic->pb->error)
2516                 break;
2517             SDL_Delay(100); /* wait for user event */
2518             continue;
2519         }
2520         /* check if packet is in play range specified by user, then queue, otherwise discard */
2521         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2522                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2523                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2524                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2525                 <= ((double)duration/1000000);
2526         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2527             packet_queue_put(&is->audioq, pkt);
2528         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2529             packet_queue_put(&is->videoq, pkt);
2530         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2531             packet_queue_put(&is->subtitleq, pkt);
2532         } else {
2533             av_free_packet(pkt);
2534         }
2535     }
2536     /* wait until the end */
2537     while (!is->abort_request) {
2538         SDL_Delay(100);
2539     }
2540
2541     ret = 0;
2542  fail:
2543     /* disable interrupting */
2544     global_video_state = NULL;
2545
2546     /* close each stream */
2547     if (is->audio_stream >= 0)
2548         stream_component_close(is, is->audio_stream);
2549     if (is->video_stream >= 0)
2550         stream_component_close(is, is->video_stream);
2551     if (is->subtitle_stream >= 0)
2552         stream_component_close(is, is->subtitle_stream);
2553     if (is->ic) {
2554         av_close_input_file(is->ic);
2555         is->ic = NULL; /* safety */
2556     }
2557     avio_set_interrupt_cb(NULL);
2558
2559     if (ret != 0) {
2560         SDL_Event event;
2561
2562         event.type = FF_QUIT_EVENT;
2563         event.user.data1 = is;
2564         SDL_PushEvent(&event);
2565     }
2566     return 0;
2567 }
2568
2569 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2570 {
2571     VideoState *is;
2572
2573     is = av_mallocz(sizeof(VideoState));
2574     if (!is)
2575         return NULL;
2576     av_strlcpy(is->filename, filename, sizeof(is->filename));
2577     is->iformat = iformat;
2578     is->ytop = 0;
2579     is->xleft = 0;
2580
2581     /* start video display */
2582     is->pictq_mutex = SDL_CreateMutex();
2583     is->pictq_cond = SDL_CreateCond();
2584
2585     is->subpq_mutex = SDL_CreateMutex();
2586     is->subpq_cond = SDL_CreateCond();
2587
2588     is->av_sync_type = av_sync_type;
2589     is->read_tid = SDL_CreateThread(read_thread, is);
2590     if (!is->read_tid) {
2591         av_free(is);
2592         return NULL;
2593     }
2594     return is;
2595 }
2596
2597 static void stream_cycle_channel(VideoState *is, int codec_type)
2598 {
2599     AVFormatContext *ic = is->ic;
2600     int start_index, stream_index;
2601     AVStream *st;
2602
2603     if (codec_type == AVMEDIA_TYPE_VIDEO)
2604         start_index = is->video_stream;
2605     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2606         start_index = is->audio_stream;
2607     else
2608         start_index = is->subtitle_stream;
2609     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2610         return;
2611     stream_index = start_index;
2612     for(;;) {
2613         if (++stream_index >= is->ic->nb_streams)
2614         {
2615             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2616             {
2617                 stream_index = -1;
2618                 goto the_end;
2619             } else
2620                 stream_index = 0;
2621         }
2622         if (stream_index == start_index)
2623             return;
2624         st = ic->streams[stream_index];
2625         if (st->codec->codec_type == codec_type) {
2626             /* check that parameters are OK */
2627             switch(codec_type) {
2628             case AVMEDIA_TYPE_AUDIO:
2629                 if (st->codec->sample_rate != 0 &&
2630                     st->codec->channels != 0)
2631                     goto the_end;
2632                 break;
2633             case AVMEDIA_TYPE_VIDEO:
2634             case AVMEDIA_TYPE_SUBTITLE:
2635                 goto the_end;
2636             default:
2637                 break;
2638             }
2639         }
2640     }
2641  the_end:
2642     stream_component_close(is, start_index);
2643     stream_component_open(is, stream_index);
2644 }
2645
2646
2647 static void toggle_full_screen(void)
2648 {
2649     is_full_screen = !is_full_screen;
2650     video_open(cur_stream);
2651 }
2652
2653 static void toggle_pause(void)
2654 {
2655     if (cur_stream)
2656         stream_toggle_pause(cur_stream);
2657     step = 0;
2658 }
2659
2660 static void step_to_next_frame(void)
2661 {
2662     if (cur_stream) {
2663         /* if the stream is paused unpause it, then step */
2664         if (cur_stream->paused)
2665             stream_toggle_pause(cur_stream);
2666     }
2667     step = 1;
2668 }
2669
2670 static void toggle_audio_display(void)
2671 {
2672     if (cur_stream) {
2673         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2674         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2675         fill_rectangle(screen,
2676                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2677                     bgcolor);
2678         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2679     }
2680 }
2681
2682 /* handle an event sent by the GUI */
2683 static void event_loop(void)
2684 {
2685     SDL_Event event;
2686     double incr, pos, frac;
2687
2688     for(;;) {
2689         double x;
2690         SDL_WaitEvent(&event);
2691         switch(event.type) {
2692         case SDL_KEYDOWN:
2693             if (exit_on_keydown) {
2694                 do_exit();
2695                 break;
2696             }
2697             switch(event.key.keysym.sym) {
2698             case SDLK_ESCAPE:
2699             case SDLK_q:
2700                 do_exit();
2701                 break;
2702             case SDLK_f:
2703                 toggle_full_screen();
2704                 break;
2705             case SDLK_p:
2706             case SDLK_SPACE:
2707                 toggle_pause();
2708                 break;
2709             case SDLK_s: //S: Step to next frame
2710                 step_to_next_frame();
2711                 break;
2712             case SDLK_a:
2713                 if (cur_stream)
2714                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2715                 break;
2716             case SDLK_v:
2717                 if (cur_stream)
2718                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2719                 break;
2720             case SDLK_t:
2721                 if (cur_stream)
2722                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2723                 break;
2724             case SDLK_w:
2725                 toggle_audio_display();
2726                 break;
2727             case SDLK_LEFT:
2728                 incr = -10.0;
2729                 goto do_seek;
2730             case SDLK_RIGHT:
2731                 incr = 10.0;
2732                 goto do_seek;
2733             case SDLK_UP:
2734                 incr = 60.0;
2735                 goto do_seek;
2736             case SDLK_DOWN:
2737                 incr = -60.0;
2738             do_seek:
2739                 if (cur_stream) {
2740                     if (seek_by_bytes) {
2741                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2742                             pos= cur_stream->video_current_pos;
2743                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2744                             pos= cur_stream->audio_pkt.pos;
2745                         }else
2746                             pos = avio_tell(cur_stream->ic->pb);
2747                         if (cur_stream->ic->bit_rate)
2748                             incr *= cur_stream->ic->bit_rate / 8.0;
2749                         else
2750                             incr *= 180000.0;
2751                         pos += incr;
2752                         stream_seek(cur_stream, pos, incr, 1);
2753                     } else {
2754                         pos = get_master_clock(cur_stream);
2755                         pos += incr;
2756                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2757                     }
2758                 }
2759                 break;
2760             default:
2761                 break;
2762             }
2763             break;
2764         case SDL_MOUSEBUTTONDOWN:
2765             if (exit_on_mousedown) {
2766                 do_exit();
2767                 break;
2768             }
2769         case SDL_MOUSEMOTION:
2770             if(event.type ==SDL_MOUSEBUTTONDOWN){
2771                 x= event.button.x;
2772             }else{
2773                 if(event.motion.state != SDL_PRESSED)
2774                     break;
2775                 x= event.motion.x;
2776             }
2777             if (cur_stream) {
2778                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2779                     uint64_t size=  avio_size(cur_stream->ic->pb);
2780                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2781                 }else{
2782                     int64_t ts;
2783                     int ns, hh, mm, ss;
2784                     int tns, thh, tmm, tss;
2785                     tns = cur_stream->ic->duration/1000000LL;
2786                     thh = tns/3600;
2787                     tmm = (tns%3600)/60;
2788                     tss = (tns%60);
2789                     frac = x/cur_stream->width;
2790                     ns = frac*tns;
2791                     hh = ns/3600;
2792                     mm = (ns%3600)/60;
2793                     ss = (ns%60);
2794                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2795                             hh, mm, ss, thh, tmm, tss);
2796                     ts = frac*cur_stream->ic->duration;
2797                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2798                         ts += cur_stream->ic->start_time;
2799                     stream_seek(cur_stream, ts, 0, 0);
2800                 }
2801             }
2802             break;
2803         case SDL_VIDEORESIZE:
2804             if (cur_stream) {
2805                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2806                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2807                 screen_width = cur_stream->width = event.resize.w;
2808                 screen_height= cur_stream->height= event.resize.h;
2809             }
2810             break;
2811         case SDL_QUIT:
2812         case FF_QUIT_EVENT:
2813             do_exit();
2814             break;
2815         case FF_ALLOC_EVENT:
2816             video_open(event.user.data1);
2817             alloc_picture(event.user.data1);
2818             break;
2819         case FF_REFRESH_EVENT:
2820             video_refresh(event.user.data1);
2821             cur_stream->refresh=0;
2822             break;
2823         default:
2824             break;
2825         }
2826     }
2827 }
2828
2829 static int opt_frame_size(const char *opt, const char *arg)
2830 {
2831     av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
2832     return opt_default("video_size", arg);
2833 }
2834
2835 static int opt_width(const char *opt, const char *arg)
2836 {
2837     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2838     return 0;
2839 }
2840
2841 static int opt_height(const char *opt, const char *arg)
2842 {
2843     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2844     return 0;
2845 }
2846
2847 static int opt_format(const char *opt, const char *arg)
2848 {
2849     file_iformat = av_find_input_format(arg);
2850     if (!file_iformat) {
2851         fprintf(stderr, "Unknown input format: %s\n", arg);
2852         return AVERROR(EINVAL);
2853     }
2854     return 0;
2855 }
2856
2857 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2858 {
2859     av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
2860     return opt_default("pixel_format", arg);
2861 }
2862
2863 static int opt_sync(const char *opt, const char *arg)
2864 {
2865     if (!strcmp(arg, "audio"))
2866         av_sync_type = AV_SYNC_AUDIO_MASTER;
2867     else if (!strcmp(arg, "video"))
2868         av_sync_type = AV_SYNC_VIDEO_MASTER;
2869     else if (!strcmp(arg, "ext"))
2870         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2871     else {
2872         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2873         exit(1);
2874     }
2875     return 0;
2876 }
2877
2878 static int opt_seek(const char *opt, const char *arg)
2879 {
2880     start_time = parse_time_or_die(opt, arg, 1);
2881     return 0;
2882 }
2883
2884 static int opt_duration(const char *opt, const char *arg)
2885 {
2886     duration = parse_time_or_die(opt, arg, 1);
2887     return 0;
2888 }
2889
2890 static int opt_thread_count(const char *opt, const char *arg)
2891 {
2892     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2893 #if !HAVE_THREADS
2894     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2895 #endif
2896     return 0;
2897 }
2898
2899 static int opt_show_mode(const char *opt, const char *arg)
2900 {
2901     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2902                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2903                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2904                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2905     return 0;
2906 }
2907
2908 static int opt_input_file(const char *opt, const char *filename)
2909 {
2910     if (input_filename) {
2911         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2912                 filename, input_filename);
2913         exit(1);
2914     }
2915     if (!strcmp(filename, "-"))
2916         filename = "pipe:";
2917     input_filename = filename;
2918     return 0;
2919 }
2920
2921 static const OptionDef options[] = {
2922 #include "cmdutils_common_opts.h"
2923     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2924     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2925     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2926     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2927     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2928     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2929     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2930     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2931     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2932     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2933     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2934     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2935     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2936     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2937     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2938     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2939     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2940     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2941     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2942     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2943     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2944     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2945     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2946     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2947     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2948     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2949     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2950     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2951     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2952     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2953     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2954     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2955     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2956     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2957     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2958 #if CONFIG_AVFILTER
2959     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2960 #endif
2961     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2962     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2963     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2964     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2965     { NULL, },
2966 };
2967
2968 static void show_usage(void)
2969 {
2970     printf("Simple media player\n");
2971     printf("usage: %s [options] input_file\n", program_name);
2972     printf("\n");
2973 }
2974
2975 static int opt_help(const char *opt, const char *arg)
2976 {
2977     av_log_set_callback(log_callback_help);
2978     show_usage();
2979     show_help_options(options, "Main options:\n",
2980                       OPT_EXPERT, 0);
2981     show_help_options(options, "\nAdvanced options:\n",
2982                       OPT_EXPERT, OPT_EXPERT);
2983     printf("\n");
2984     av_opt_show2(avcodec_opts[0], NULL,
2985                  AV_OPT_FLAG_DECODING_PARAM, 0);
2986     printf("\n");
2987     av_opt_show2(avformat_opts, NULL,
2988                  AV_OPT_FLAG_DECODING_PARAM, 0);
2989 #if !CONFIG_AVFILTER
2990     printf("\n");
2991     av_opt_show2(sws_opts, NULL,
2992                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2993 #endif
2994     printf("\nWhile playing:\n"
2995            "q, ESC              quit\n"
2996            "f                   toggle full screen\n"
2997            "p, SPC              pause\n"
2998            "a                   cycle audio channel\n"
2999            "v                   cycle video channel\n"
3000            "t                   cycle subtitle channel\n"
3001            "w                   show audio waves\n"
3002            "s                   activate frame-step mode\n"
3003            "left/right          seek backward/forward 10 seconds\n"
3004            "down/up             seek backward/forward 1 minute\n"
3005            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3006            );
3007     return 0;
3008 }
3009
3010 /* Called from the main */
3011 int main(int argc, char **argv)
3012 {
3013     int flags;
3014
3015     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3016
3017     /* register all codecs, demux and protocols */
3018     avcodec_register_all();
3019 #if CONFIG_AVDEVICE
3020     avdevice_register_all();
3021 #endif
3022 #if CONFIG_AVFILTER
3023     avfilter_register_all();
3024 #endif
3025     av_register_all();
3026
3027     init_opts();
3028
3029     show_banner();
3030
3031     parse_options(argc, argv, options, opt_input_file);
3032
3033     if (!input_filename) {
3034         show_usage();
3035         fprintf(stderr, "An input file must be specified\n");
3036         fprintf(stderr, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3037         exit(1);
3038     }
3039
3040     if (display_disable) {
3041         video_disable = 1;
3042     }
3043     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3044     if (audio_disable)
3045         flags &= ~SDL_INIT_AUDIO;
3046 #if !defined(__MINGW32__) && !defined(__APPLE__)
3047     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3048 #endif
3049     if (SDL_Init (flags)) {
3050         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3051         fprintf(stderr, "(Did you set the DISPLAY variable?)\n");
3052         exit(1);
3053     }
3054
3055     if (!display_disable) {
3056 #if HAVE_SDL_VIDEO_SIZE
3057         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3058         fs_screen_width = vi->current_w;
3059         fs_screen_height = vi->current_h;
3060 #endif
3061     }
3062
3063     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3064     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3065     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3066
3067     av_init_packet(&flush_pkt);
3068     flush_pkt.data= "FLUSH";
3069
3070     cur_stream = stream_open(input_filename, file_iformat);
3071
3072     event_loop();
3073
3074     /* never returns */
3075
3076     return 0;
3077 }