lavfi: add layout negotiation fields and helper functions.
[ffmpeg.git] / ffplay.c
1 /*
2  * ffplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/colorspace.h"
28 #include "libavutil/pixdesc.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/parseutils.h"
31 #include "libavutil/samplefmt.h"
32 #include "libavutil/avassert.h"
33 #include "libavformat/avformat.h"
34 #include "libavdevice/avdevice.h"
35 #include "libswscale/swscale.h"
36 #include "libavcodec/audioconvert.h"
37 #include "libavutil/opt.h"
38 #include "libavcodec/avfft.h"
39
40 #if CONFIG_AVFILTER
41 # include "libavfilter/avcodec.h"
42 # include "libavfilter/avfilter.h"
43 # include "libavfilter/avfiltergraph.h"
44 # include "libavfilter/vsink_buffer.h"
45 #endif
46
47 #include <SDL.h>
48 #include <SDL_thread.h>
49
50 #include "cmdutils.h"
51
52 #include <unistd.h>
53 #include <assert.h>
54
55 const char program_name[] = "ffplay";
56 const int program_birth_year = 2003;
57
58 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
59 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
60 #define MIN_FRAMES 5
61
62 /* SDL audio buffer size, in samples. Should be small to have precise
63    A/V sync as SDL does not have hardware buffer fullness info. */
64 #define SDL_AUDIO_BUFFER_SIZE 1024
65
66 /* no AV sync correction is done if below the AV sync threshold */
67 #define AV_SYNC_THRESHOLD 0.01
68 /* no AV correction is done if too big error */
69 #define AV_NOSYNC_THRESHOLD 10.0
70
71 #define FRAME_SKIP_FACTOR 0.05
72
73 /* maximum audio speed change to get correct sync */
74 #define SAMPLE_CORRECTION_PERCENT_MAX 10
75
76 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
77 #define AUDIO_DIFF_AVG_NB   20
78
79 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
80 #define SAMPLE_ARRAY_SIZE (2*65536)
81
82 static int sws_flags = SWS_BICUBIC;
83
84 typedef struct PacketQueue {
85     AVPacketList *first_pkt, *last_pkt;
86     int nb_packets;
87     int size;
88     int abort_request;
89     SDL_mutex *mutex;
90     SDL_cond *cond;
91 } PacketQueue;
92
93 #define VIDEO_PICTURE_QUEUE_SIZE 2
94 #define SUBPICTURE_QUEUE_SIZE 4
95
96 typedef struct VideoPicture {
97     double pts;                                  ///<presentation time stamp for this picture
98     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
99     int64_t pos;                                 ///<byte position in file
100     SDL_Overlay *bmp;
101     int width, height; /* source height & width */
102     int allocated;
103     enum PixelFormat pix_fmt;
104
105 #if CONFIG_AVFILTER
106     AVFilterBufferRef *picref;
107 #endif
108 } VideoPicture;
109
110 typedef struct SubPicture {
111     double pts; /* presentation time stamp for this picture */
112     AVSubtitle sub;
113 } SubPicture;
114
115 enum {
116     AV_SYNC_AUDIO_MASTER, /* default choice */
117     AV_SYNC_VIDEO_MASTER,
118     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
119 };
120
121 typedef struct VideoState {
122     SDL_Thread *read_tid;
123     SDL_Thread *video_tid;
124     SDL_Thread *refresh_tid;
125     AVInputFormat *iformat;
126     int no_background;
127     int abort_request;
128     int paused;
129     int last_paused;
130     int seek_req;
131     int seek_flags;
132     int64_t seek_pos;
133     int64_t seek_rel;
134     int read_pause_return;
135     AVFormatContext *ic;
136
137     int audio_stream;
138
139     int av_sync_type;
140     double external_clock; /* external clock base */
141     int64_t external_clock_time;
142
143     double audio_clock;
144     double audio_diff_cum; /* used for AV difference average computation */
145     double audio_diff_avg_coef;
146     double audio_diff_threshold;
147     int audio_diff_avg_count;
148     AVStream *audio_st;
149     PacketQueue audioq;
150     int audio_hw_buf_size;
151     /* samples output by the codec. we reserve more space for avsync
152        compensation */
153     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
154     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
155     uint8_t *audio_buf;
156     unsigned int audio_buf_size; /* in bytes */
157     int audio_buf_index; /* in bytes */
158     AVPacket audio_pkt_temp;
159     AVPacket audio_pkt;
160     enum AVSampleFormat audio_src_fmt;
161     AVAudioConvert *reformat_ctx;
162
163     enum ShowMode {
164         SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
165     } show_mode;
166     int16_t sample_array[SAMPLE_ARRAY_SIZE];
167     int sample_array_index;
168     int last_i_start;
169     RDFTContext *rdft;
170     int rdft_bits;
171     FFTSample *rdft_data;
172     int xpos;
173
174     SDL_Thread *subtitle_tid;
175     int subtitle_stream;
176     int subtitle_stream_changed;
177     AVStream *subtitle_st;
178     PacketQueue subtitleq;
179     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
180     int subpq_size, subpq_rindex, subpq_windex;
181     SDL_mutex *subpq_mutex;
182     SDL_cond *subpq_cond;
183
184     double frame_timer;
185     double frame_last_pts;
186     double frame_last_delay;
187     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
188     int video_stream;
189     AVStream *video_st;
190     PacketQueue videoq;
191     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
192     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
193     int64_t video_current_pos;                   ///<current displayed file pos
194     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
195     int pictq_size, pictq_rindex, pictq_windex;
196     SDL_mutex *pictq_mutex;
197     SDL_cond *pictq_cond;
198 #if !CONFIG_AVFILTER
199     struct SwsContext *img_convert_ctx;
200 #endif
201
202     char filename[1024];
203     int width, height, xleft, ytop;
204
205 #if CONFIG_AVFILTER
206     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
207 #endif
208
209     float skip_frames;
210     float skip_frames_index;
211     int refresh;
212 } VideoState;
213
214 static void show_help(void);
215
216 /* options specified by the user */
217 static AVInputFormat *file_iformat;
218 static const char *input_filename;
219 static const char *window_title;
220 static int fs_screen_width;
221 static int fs_screen_height;
222 static int screen_width = 0;
223 static int screen_height = 0;
224 static int frame_width = 0;
225 static int frame_height = 0;
226 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
227 static int audio_disable;
228 static int video_disable;
229 static int wanted_stream[AVMEDIA_TYPE_NB]={
230     [AVMEDIA_TYPE_AUDIO]=-1,
231     [AVMEDIA_TYPE_VIDEO]=-1,
232     [AVMEDIA_TYPE_SUBTITLE]=-1,
233 };
234 static int seek_by_bytes=-1;
235 static int display_disable;
236 static int show_status = 1;
237 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
238 static int64_t start_time = AV_NOPTS_VALUE;
239 static int64_t duration = AV_NOPTS_VALUE;
240 static int step = 0;
241 static int thread_count = 1;
242 static int workaround_bugs = 1;
243 static int fast = 0;
244 static int genpts = 0;
245 static int lowres = 0;
246 static int idct = FF_IDCT_AUTO;
247 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
248 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
249 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
250 static int error_recognition = FF_ER_CAREFUL;
251 static int error_concealment = 3;
252 static int decoder_reorder_pts= -1;
253 static int autoexit;
254 static int exit_on_keydown;
255 static int exit_on_mousedown;
256 static int loop=1;
257 static int framedrop=1;
258 static enum ShowMode show_mode = SHOW_MODE_NONE;
259
260 static int rdftspeed=20;
261 #if CONFIG_AVFILTER
262 static char *vfilters = NULL;
263 #endif
264
265 /* current context */
266 static int is_full_screen;
267 static VideoState *cur_stream;
268 static int64_t audio_callback_time;
269
270 static AVPacket flush_pkt;
271
272 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
273 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
274 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
275
276 static SDL_Surface *screen;
277
278 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
279 {
280     AVPacketList *pkt1;
281
282     /* duplicate the packet */
283     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
284         return -1;
285
286     pkt1 = av_malloc(sizeof(AVPacketList));
287     if (!pkt1)
288         return -1;
289     pkt1->pkt = *pkt;
290     pkt1->next = NULL;
291
292
293     SDL_LockMutex(q->mutex);
294
295     if (!q->last_pkt)
296
297         q->first_pkt = pkt1;
298     else
299         q->last_pkt->next = pkt1;
300     q->last_pkt = pkt1;
301     q->nb_packets++;
302     q->size += pkt1->pkt.size + sizeof(*pkt1);
303     /* XXX: should duplicate packet data in DV case */
304     SDL_CondSignal(q->cond);
305
306     SDL_UnlockMutex(q->mutex);
307     return 0;
308 }
309
310 /* packet queue handling */
311 static void packet_queue_init(PacketQueue *q)
312 {
313     memset(q, 0, sizeof(PacketQueue));
314     q->mutex = SDL_CreateMutex();
315     q->cond = SDL_CreateCond();
316     packet_queue_put(q, &flush_pkt);
317 }
318
319 static void packet_queue_flush(PacketQueue *q)
320 {
321     AVPacketList *pkt, *pkt1;
322
323     SDL_LockMutex(q->mutex);
324     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
325         pkt1 = pkt->next;
326         av_free_packet(&pkt->pkt);
327         av_freep(&pkt);
328     }
329     q->last_pkt = NULL;
330     q->first_pkt = NULL;
331     q->nb_packets = 0;
332     q->size = 0;
333     SDL_UnlockMutex(q->mutex);
334 }
335
336 static void packet_queue_end(PacketQueue *q)
337 {
338     packet_queue_flush(q);
339     SDL_DestroyMutex(q->mutex);
340     SDL_DestroyCond(q->cond);
341 }
342
343 static void packet_queue_abort(PacketQueue *q)
344 {
345     SDL_LockMutex(q->mutex);
346
347     q->abort_request = 1;
348
349     SDL_CondSignal(q->cond);
350
351     SDL_UnlockMutex(q->mutex);
352 }
353
354 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
355 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
356 {
357     AVPacketList *pkt1;
358     int ret;
359
360     SDL_LockMutex(q->mutex);
361
362     for(;;) {
363         if (q->abort_request) {
364             ret = -1;
365             break;
366         }
367
368         pkt1 = q->first_pkt;
369         if (pkt1) {
370             q->first_pkt = pkt1->next;
371             if (!q->first_pkt)
372                 q->last_pkt = NULL;
373             q->nb_packets--;
374             q->size -= pkt1->pkt.size + sizeof(*pkt1);
375             *pkt = pkt1->pkt;
376             av_free(pkt1);
377             ret = 1;
378             break;
379         } else if (!block) {
380             ret = 0;
381             break;
382         } else {
383             SDL_CondWait(q->cond, q->mutex);
384         }
385     }
386     SDL_UnlockMutex(q->mutex);
387     return ret;
388 }
389
390 static inline void fill_rectangle(SDL_Surface *screen,
391                                   int x, int y, int w, int h, int color)
392 {
393     SDL_Rect rect;
394     rect.x = x;
395     rect.y = y;
396     rect.w = w;
397     rect.h = h;
398     SDL_FillRect(screen, &rect, color);
399 }
400
401 #define ALPHA_BLEND(a, oldp, newp, s)\
402 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
403
404 #define RGBA_IN(r, g, b, a, s)\
405 {\
406     unsigned int v = ((const uint32_t *)(s))[0];\
407     a = (v >> 24) & 0xff;\
408     r = (v >> 16) & 0xff;\
409     g = (v >> 8) & 0xff;\
410     b = v & 0xff;\
411 }
412
413 #define YUVA_IN(y, u, v, a, s, pal)\
414 {\
415     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
416     a = (val >> 24) & 0xff;\
417     y = (val >> 16) & 0xff;\
418     u = (val >> 8) & 0xff;\
419     v = val & 0xff;\
420 }
421
422 #define YUVA_OUT(d, y, u, v, a)\
423 {\
424     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
425 }
426
427
428 #define BPP 1
429
430 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
431 {
432     int wrap, wrap3, width2, skip2;
433     int y, u, v, a, u1, v1, a1, w, h;
434     uint8_t *lum, *cb, *cr;
435     const uint8_t *p;
436     const uint32_t *pal;
437     int dstx, dsty, dstw, dsth;
438
439     dstw = av_clip(rect->w, 0, imgw);
440     dsth = av_clip(rect->h, 0, imgh);
441     dstx = av_clip(rect->x, 0, imgw - dstw);
442     dsty = av_clip(rect->y, 0, imgh - dsth);
443     lum = dst->data[0] + dsty * dst->linesize[0];
444     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
445     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
446
447     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
448     skip2 = dstx >> 1;
449     wrap = dst->linesize[0];
450     wrap3 = rect->pict.linesize[0];
451     p = rect->pict.data[0];
452     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
453
454     if (dsty & 1) {
455         lum += dstx;
456         cb += skip2;
457         cr += skip2;
458
459         if (dstx & 1) {
460             YUVA_IN(y, u, v, a, p, pal);
461             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
462             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
463             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
464             cb++;
465             cr++;
466             lum++;
467             p += BPP;
468         }
469         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
470             YUVA_IN(y, u, v, a, p, pal);
471             u1 = u;
472             v1 = v;
473             a1 = a;
474             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475
476             YUVA_IN(y, u, v, a, p + BPP, pal);
477             u1 += u;
478             v1 += v;
479             a1 += a;
480             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
481             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
482             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
483             cb++;
484             cr++;
485             p += 2 * BPP;
486             lum += 2;
487         }
488         if (w) {
489             YUVA_IN(y, u, v, a, p, pal);
490             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
492             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
493             p++;
494             lum++;
495         }
496         p += wrap3 - dstw * BPP;
497         lum += wrap - dstw - dstx;
498         cb += dst->linesize[1] - width2 - skip2;
499         cr += dst->linesize[2] - width2 - skip2;
500     }
501     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
502         lum += dstx;
503         cb += skip2;
504         cr += skip2;
505
506         if (dstx & 1) {
507             YUVA_IN(y, u, v, a, p, pal);
508             u1 = u;
509             v1 = v;
510             a1 = a;
511             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
512             p += wrap3;
513             lum += wrap;
514             YUVA_IN(y, u, v, a, p, pal);
515             u1 += u;
516             v1 += v;
517             a1 += a;
518             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
519             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
520             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
521             cb++;
522             cr++;
523             p += -wrap3 + BPP;
524             lum += -wrap + 1;
525         }
526         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
527             YUVA_IN(y, u, v, a, p, pal);
528             u1 = u;
529             v1 = v;
530             a1 = a;
531             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
532
533             YUVA_IN(y, u, v, a, p + BPP, pal);
534             u1 += u;
535             v1 += v;
536             a1 += a;
537             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
538             p += wrap3;
539             lum += wrap;
540
541             YUVA_IN(y, u, v, a, p, pal);
542             u1 += u;
543             v1 += v;
544             a1 += a;
545             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
546
547             YUVA_IN(y, u, v, a, p + BPP, pal);
548             u1 += u;
549             v1 += v;
550             a1 += a;
551             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
552
553             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
554             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
555
556             cb++;
557             cr++;
558             p += -wrap3 + 2 * BPP;
559             lum += -wrap + 2;
560         }
561         if (w) {
562             YUVA_IN(y, u, v, a, p, pal);
563             u1 = u;
564             v1 = v;
565             a1 = a;
566             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
567             p += wrap3;
568             lum += wrap;
569             YUVA_IN(y, u, v, a, p, pal);
570             u1 += u;
571             v1 += v;
572             a1 += a;
573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
575             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
576             cb++;
577             cr++;
578             p += -wrap3 + BPP;
579             lum += -wrap + 1;
580         }
581         p += wrap3 + (wrap3 - dstw * BPP);
582         lum += wrap + (wrap - dstw - dstx);
583         cb += dst->linesize[1] - width2 - skip2;
584         cr += dst->linesize[2] - width2 - skip2;
585     }
586     /* handle odd height */
587     if (h) {
588         lum += dstx;
589         cb += skip2;
590         cr += skip2;
591
592         if (dstx & 1) {
593             YUVA_IN(y, u, v, a, p, pal);
594             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
595             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
596             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
597             cb++;
598             cr++;
599             lum++;
600             p += BPP;
601         }
602         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
603             YUVA_IN(y, u, v, a, p, pal);
604             u1 = u;
605             v1 = v;
606             a1 = a;
607             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
608
609             YUVA_IN(y, u, v, a, p + BPP, pal);
610             u1 += u;
611             v1 += v;
612             a1 += a;
613             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
614             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
615             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
616             cb++;
617             cr++;
618             p += 2 * BPP;
619             lum += 2;
620         }
621         if (w) {
622             YUVA_IN(y, u, v, a, p, pal);
623             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
624             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
625             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
626         }
627     }
628 }
629
630 static void free_subpicture(SubPicture *sp)
631 {
632     avsubtitle_free(&sp->sub);
633 }
634
635 static void video_image_display(VideoState *is)
636 {
637     VideoPicture *vp;
638     SubPicture *sp;
639     AVPicture pict;
640     float aspect_ratio;
641     int width, height, x, y;
642     SDL_Rect rect;
643     int i;
644
645     vp = &is->pictq[is->pictq_rindex];
646     if (vp->bmp) {
647 #if CONFIG_AVFILTER
648          if (vp->picref->video->sample_aspect_ratio.num == 0)
649              aspect_ratio = 0;
650          else
651              aspect_ratio = av_q2d(vp->picref->video->sample_aspect_ratio);
652 #else
653
654         /* XXX: use variable in the frame */
655         if (is->video_st->sample_aspect_ratio.num)
656             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
657         else if (is->video_st->codec->sample_aspect_ratio.num)
658             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
659         else
660             aspect_ratio = 0;
661 #endif
662         if (aspect_ratio <= 0.0)
663             aspect_ratio = 1.0;
664         aspect_ratio *= (float)vp->width / (float)vp->height;
665
666         if (is->subtitle_st) {
667             if (is->subpq_size > 0) {
668                 sp = &is->subpq[is->subpq_rindex];
669
670                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
671                     SDL_LockYUVOverlay (vp->bmp);
672
673                     pict.data[0] = vp->bmp->pixels[0];
674                     pict.data[1] = vp->bmp->pixels[2];
675                     pict.data[2] = vp->bmp->pixels[1];
676
677                     pict.linesize[0] = vp->bmp->pitches[0];
678                     pict.linesize[1] = vp->bmp->pitches[2];
679                     pict.linesize[2] = vp->bmp->pitches[1];
680
681                     for (i = 0; i < sp->sub.num_rects; i++)
682                         blend_subrect(&pict, sp->sub.rects[i],
683                                       vp->bmp->w, vp->bmp->h);
684
685                     SDL_UnlockYUVOverlay (vp->bmp);
686                 }
687             }
688         }
689
690
691         /* XXX: we suppose the screen has a 1.0 pixel ratio */
692         height = is->height;
693         width = ((int)rint(height * aspect_ratio)) & ~1;
694         if (width > is->width) {
695             width = is->width;
696             height = ((int)rint(width / aspect_ratio)) & ~1;
697         }
698         x = (is->width - width) / 2;
699         y = (is->height - height) / 2;
700         is->no_background = 0;
701         rect.x = is->xleft + x;
702         rect.y = is->ytop  + y;
703         rect.w = FFMAX(width,  1);
704         rect.h = FFMAX(height, 1);
705         SDL_DisplayYUVOverlay(vp->bmp, &rect);
706     }
707 }
708
709 /* get the current audio output buffer size, in samples. With SDL, we
710    cannot have a precise information */
711 static int audio_write_get_buf_size(VideoState *is)
712 {
713     return is->audio_buf_size - is->audio_buf_index;
714 }
715
716 static inline int compute_mod(int a, int b)
717 {
718     return a < 0 ? a%b + b : a%b;
719 }
720
721 static void video_audio_display(VideoState *s)
722 {
723     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
724     int ch, channels, h, h2, bgcolor, fgcolor;
725     int16_t time_diff;
726     int rdft_bits, nb_freq;
727
728     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
729         ;
730     nb_freq= 1<<(rdft_bits-1);
731
732     /* compute display index : center on currently output samples */
733     channels = s->audio_st->codec->channels;
734     nb_display_channels = channels;
735     if (!s->paused) {
736         int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
737         n = 2 * channels;
738         delay = audio_write_get_buf_size(s);
739         delay /= n;
740
741         /* to be more precise, we take into account the time spent since
742            the last buffer computation */
743         if (audio_callback_time) {
744             time_diff = av_gettime() - audio_callback_time;
745             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
746         }
747
748         delay += 2*data_used;
749         if (delay < data_used)
750             delay = data_used;
751
752         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
753         if (s->show_mode == SHOW_MODE_WAVES) {
754             h= INT_MIN;
755             for(i=0; i<1000; i+=channels){
756                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
757                 int a= s->sample_array[idx];
758                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
759                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
760                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
761                 int score= a-d;
762                 if(h<score && (b^c)<0){
763                     h= score;
764                     i_start= idx;
765                 }
766             }
767         }
768
769         s->last_i_start = i_start;
770     } else {
771         i_start = s->last_i_start;
772     }
773
774     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
775     if (s->show_mode == SHOW_MODE_WAVES) {
776         fill_rectangle(screen,
777                        s->xleft, s->ytop, s->width, s->height,
778                        bgcolor);
779
780         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
781
782         /* total height for one channel */
783         h = s->height / nb_display_channels;
784         /* graph height / 2 */
785         h2 = (h * 9) / 20;
786         for(ch = 0;ch < nb_display_channels; ch++) {
787             i = i_start + ch;
788             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
789             for(x = 0; x < s->width; x++) {
790                 y = (s->sample_array[i] * h2) >> 15;
791                 if (y < 0) {
792                     y = -y;
793                     ys = y1 - y;
794                 } else {
795                     ys = y1;
796                 }
797                 fill_rectangle(screen,
798                                s->xleft + x, ys, 1, y,
799                                fgcolor);
800                 i += channels;
801                 if (i >= SAMPLE_ARRAY_SIZE)
802                     i -= SAMPLE_ARRAY_SIZE;
803             }
804         }
805
806         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
807
808         for(ch = 1;ch < nb_display_channels; ch++) {
809             y = s->ytop + ch * h;
810             fill_rectangle(screen,
811                            s->xleft, y, s->width, 1,
812                            fgcolor);
813         }
814         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
815     }else{
816         nb_display_channels= FFMIN(nb_display_channels, 2);
817         if(rdft_bits != s->rdft_bits){
818             av_rdft_end(s->rdft);
819             av_free(s->rdft_data);
820             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
821             s->rdft_bits= rdft_bits;
822             s->rdft_data= av_malloc(4*nb_freq*sizeof(*s->rdft_data));
823         }
824         {
825             FFTSample *data[2];
826             for(ch = 0;ch < nb_display_channels; ch++) {
827                 data[ch] = s->rdft_data + 2*nb_freq*ch;
828                 i = i_start + ch;
829                 for(x = 0; x < 2*nb_freq; x++) {
830                     double w= (x-nb_freq)*(1.0/nb_freq);
831                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
832                     i += channels;
833                     if (i >= SAMPLE_ARRAY_SIZE)
834                         i -= SAMPLE_ARRAY_SIZE;
835                 }
836                 av_rdft_calc(s->rdft, data[ch]);
837             }
838             //least efficient way to do this, we should of course directly access it but its more than fast enough
839             for(y=0; y<s->height; y++){
840                 double w= 1/sqrt(nb_freq);
841                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
842                 int b= (nb_display_channels == 2 ) ? sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0]
843                        + data[1][2*y+1]*data[1][2*y+1])) : a;
844                 a= FFMIN(a,255);
845                 b= FFMIN(b,255);
846                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
847
848                 fill_rectangle(screen,
849                             s->xpos, s->height-y, 1, 1,
850                             fgcolor);
851             }
852         }
853         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
854         s->xpos++;
855         if(s->xpos >= s->width)
856             s->xpos= s->xleft;
857     }
858 }
859
860 static void stream_close(VideoState *is)
861 {
862     VideoPicture *vp;
863     int i;
864     /* XXX: use a special url_shutdown call to abort parse cleanly */
865     is->abort_request = 1;
866     SDL_WaitThread(is->read_tid, NULL);
867     SDL_WaitThread(is->refresh_tid, NULL);
868
869     /* free all pictures */
870     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
871         vp = &is->pictq[i];
872 #if CONFIG_AVFILTER
873         if (vp->picref) {
874             avfilter_unref_buffer(vp->picref);
875             vp->picref = NULL;
876         }
877 #endif
878         if (vp->bmp) {
879             SDL_FreeYUVOverlay(vp->bmp);
880             vp->bmp = NULL;
881         }
882     }
883     SDL_DestroyMutex(is->pictq_mutex);
884     SDL_DestroyCond(is->pictq_cond);
885     SDL_DestroyMutex(is->subpq_mutex);
886     SDL_DestroyCond(is->subpq_cond);
887 #if !CONFIG_AVFILTER
888     if (is->img_convert_ctx)
889         sws_freeContext(is->img_convert_ctx);
890 #endif
891     av_free(is);
892 }
893
894 static void do_exit(void)
895 {
896     if (cur_stream) {
897         stream_close(cur_stream);
898         cur_stream = NULL;
899     }
900     uninit_opts();
901 #if CONFIG_AVFILTER
902     avfilter_uninit();
903 #endif
904     if (show_status)
905         printf("\n");
906     SDL_Quit();
907     av_log(NULL, AV_LOG_QUIET, "");
908     exit(0);
909 }
910
911 static int video_open(VideoState *is){
912     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
913     int w,h;
914
915     if(is_full_screen) flags |= SDL_FULLSCREEN;
916     else               flags |= SDL_RESIZABLE;
917
918     if (is_full_screen && fs_screen_width) {
919         w = fs_screen_width;
920         h = fs_screen_height;
921     } else if(!is_full_screen && screen_width){
922         w = screen_width;
923         h = screen_height;
924 #if CONFIG_AVFILTER
925     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
926         w = is->out_video_filter->inputs[0]->w;
927         h = is->out_video_filter->inputs[0]->h;
928 #else
929     }else if (is->video_st && is->video_st->codec->width){
930         w = is->video_st->codec->width;
931         h = is->video_st->codec->height;
932 #endif
933     } else {
934         w = 640;
935         h = 480;
936     }
937     if(screen && is->width == screen->w && screen->w == w
938        && is->height== screen->h && screen->h == h)
939         return 0;
940
941 #ifndef __APPLE__
942     screen = SDL_SetVideoMode(w, h, 0, flags);
943 #else
944     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
945     screen = SDL_SetVideoMode(w, h, 24, flags);
946 #endif
947     if (!screen) {
948         fprintf(stderr, "SDL: could not set video mode - exiting\n");
949         do_exit();
950     }
951     if (!window_title)
952         window_title = input_filename;
953     SDL_WM_SetCaption(window_title, window_title);
954
955     is->width = screen->w;
956     is->height = screen->h;
957
958     return 0;
959 }
960
961 /* display the current picture, if any */
962 static void video_display(VideoState *is)
963 {
964     if(!screen)
965         video_open(cur_stream);
966     if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
967         video_audio_display(is);
968     else if (is->video_st)
969         video_image_display(is);
970 }
971
972 static int refresh_thread(void *opaque)
973 {
974     VideoState *is= opaque;
975     while(!is->abort_request){
976         SDL_Event event;
977         event.type = FF_REFRESH_EVENT;
978         event.user.data1 = opaque;
979         if(!is->refresh){
980             is->refresh=1;
981             SDL_PushEvent(&event);
982         }
983         //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
984         usleep(is->audio_st && is->show_mode != SHOW_MODE_VIDEO ? rdftspeed*1000 : 5000);
985     }
986     return 0;
987 }
988
989 /* get the current audio clock value */
990 static double get_audio_clock(VideoState *is)
991 {
992     double pts;
993     int hw_buf_size, bytes_per_sec;
994     pts = is->audio_clock;
995     hw_buf_size = audio_write_get_buf_size(is);
996     bytes_per_sec = 0;
997     if (is->audio_st) {
998         bytes_per_sec = is->audio_st->codec->sample_rate *
999             2 * is->audio_st->codec->channels;
1000     }
1001     if (bytes_per_sec)
1002         pts -= (double)hw_buf_size / bytes_per_sec;
1003     return pts;
1004 }
1005
1006 /* get the current video clock value */
1007 static double get_video_clock(VideoState *is)
1008 {
1009     if (is->paused) {
1010         return is->video_current_pts;
1011     } else {
1012         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1013     }
1014 }
1015
1016 /* get the current external clock value */
1017 static double get_external_clock(VideoState *is)
1018 {
1019     int64_t ti;
1020     ti = av_gettime();
1021     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1022 }
1023
1024 /* get the current master clock value */
1025 static double get_master_clock(VideoState *is)
1026 {
1027     double val;
1028
1029     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1030         if (is->video_st)
1031             val = get_video_clock(is);
1032         else
1033             val = get_audio_clock(is);
1034     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1035         if (is->audio_st)
1036             val = get_audio_clock(is);
1037         else
1038             val = get_video_clock(is);
1039     } else {
1040         val = get_external_clock(is);
1041     }
1042     return val;
1043 }
1044
1045 /* seek in the stream */
1046 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1047 {
1048     if (!is->seek_req) {
1049         is->seek_pos = pos;
1050         is->seek_rel = rel;
1051         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1052         if (seek_by_bytes)
1053             is->seek_flags |= AVSEEK_FLAG_BYTE;
1054         is->seek_req = 1;
1055     }
1056 }
1057
1058 /* pause or resume the video */
1059 static void stream_toggle_pause(VideoState *is)
1060 {
1061     if (is->paused) {
1062         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1063         if(is->read_pause_return != AVERROR(ENOSYS)){
1064             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1065         }
1066         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1067     }
1068     is->paused = !is->paused;
1069 }
1070
1071 static double compute_target_time(double frame_current_pts, VideoState *is)
1072 {
1073     double delay, sync_threshold, diff;
1074
1075     /* compute nominal delay */
1076     delay = frame_current_pts - is->frame_last_pts;
1077     if (delay <= 0 || delay >= 10.0) {
1078         /* if incorrect delay, use previous one */
1079         delay = is->frame_last_delay;
1080     } else {
1081         is->frame_last_delay = delay;
1082     }
1083     is->frame_last_pts = frame_current_pts;
1084
1085     /* update delay to follow master synchronisation source */
1086     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1087          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1088         /* if video is slave, we try to correct big delays by
1089            duplicating or deleting a frame */
1090         diff = get_video_clock(is) - get_master_clock(is);
1091
1092         /* skip or repeat frame. We take into account the
1093            delay to compute the threshold. I still don't know
1094            if it is the best guess */
1095         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1096         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1097             if (diff <= -sync_threshold)
1098                 delay = 0;
1099             else if (diff >= sync_threshold)
1100                 delay = 2 * delay;
1101         }
1102     }
1103     is->frame_timer += delay;
1104
1105     av_dlog(NULL, "video: delay=%0.3f pts=%0.3f A-V=%f\n",
1106             delay, frame_current_pts, -diff);
1107
1108     return is->frame_timer;
1109 }
1110
1111 /* called to display each frame */
1112 static void video_refresh(void *opaque)
1113 {
1114     VideoState *is = opaque;
1115     VideoPicture *vp;
1116
1117     SubPicture *sp, *sp2;
1118
1119     if (is->video_st) {
1120 retry:
1121         if (is->pictq_size == 0) {
1122             //nothing to do, no picture to display in the que
1123         } else {
1124             double time= av_gettime()/1000000.0;
1125             double next_target;
1126             /* dequeue the picture */
1127             vp = &is->pictq[is->pictq_rindex];
1128
1129             if(time < vp->target_clock)
1130                 return;
1131             /* update current video pts */
1132             is->video_current_pts = vp->pts;
1133             is->video_current_pts_drift = is->video_current_pts - time;
1134             is->video_current_pos = vp->pos;
1135             if(is->pictq_size > 1){
1136                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1137                 assert(nextvp->target_clock >= vp->target_clock);
1138                 next_target= nextvp->target_clock;
1139             }else{
1140                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1141             }
1142             if(framedrop && time > next_target){
1143                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1144                 if(is->pictq_size > 1 || time > next_target + 0.5){
1145                     /* update queue size and signal for next picture */
1146                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1147                         is->pictq_rindex = 0;
1148
1149                     SDL_LockMutex(is->pictq_mutex);
1150                     is->pictq_size--;
1151                     SDL_CondSignal(is->pictq_cond);
1152                     SDL_UnlockMutex(is->pictq_mutex);
1153                     goto retry;
1154                 }
1155             }
1156
1157             if(is->subtitle_st) {
1158                 if (is->subtitle_stream_changed) {
1159                     SDL_LockMutex(is->subpq_mutex);
1160
1161                     while (is->subpq_size) {
1162                         free_subpicture(&is->subpq[is->subpq_rindex]);
1163
1164                         /* update queue size and signal for next picture */
1165                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1166                             is->subpq_rindex = 0;
1167
1168                         is->subpq_size--;
1169                     }
1170                     is->subtitle_stream_changed = 0;
1171
1172                     SDL_CondSignal(is->subpq_cond);
1173                     SDL_UnlockMutex(is->subpq_mutex);
1174                 } else {
1175                     if (is->subpq_size > 0) {
1176                         sp = &is->subpq[is->subpq_rindex];
1177
1178                         if (is->subpq_size > 1)
1179                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1180                         else
1181                             sp2 = NULL;
1182
1183                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1184                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1185                         {
1186                             free_subpicture(sp);
1187
1188                             /* update queue size and signal for next picture */
1189                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1190                                 is->subpq_rindex = 0;
1191
1192                             SDL_LockMutex(is->subpq_mutex);
1193                             is->subpq_size--;
1194                             SDL_CondSignal(is->subpq_cond);
1195                             SDL_UnlockMutex(is->subpq_mutex);
1196                         }
1197                     }
1198                 }
1199             }
1200
1201             /* display picture */
1202             if (!display_disable)
1203                 video_display(is);
1204
1205             /* update queue size and signal for next picture */
1206             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1207                 is->pictq_rindex = 0;
1208
1209             SDL_LockMutex(is->pictq_mutex);
1210             is->pictq_size--;
1211             SDL_CondSignal(is->pictq_cond);
1212             SDL_UnlockMutex(is->pictq_mutex);
1213         }
1214     } else if (is->audio_st) {
1215         /* draw the next audio frame */
1216
1217         /* if only audio stream, then display the audio bars (better
1218            than nothing, just to test the implementation */
1219
1220         /* display picture */
1221         if (!display_disable)
1222             video_display(is);
1223     }
1224     if (show_status) {
1225         static int64_t last_time;
1226         int64_t cur_time;
1227         int aqsize, vqsize, sqsize;
1228         double av_diff;
1229
1230         cur_time = av_gettime();
1231         if (!last_time || (cur_time - last_time) >= 30000) {
1232             aqsize = 0;
1233             vqsize = 0;
1234             sqsize = 0;
1235             if (is->audio_st)
1236                 aqsize = is->audioq.size;
1237             if (is->video_st)
1238                 vqsize = is->videoq.size;
1239             if (is->subtitle_st)
1240                 sqsize = is->subtitleq.size;
1241             av_diff = 0;
1242             if (is->audio_st && is->video_st)
1243                 av_diff = get_audio_clock(is) - get_video_clock(is);
1244             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1245                    get_master_clock(is),
1246                    av_diff,
1247                    FFMAX(is->skip_frames-1, 0),
1248                    aqsize / 1024,
1249                    vqsize / 1024,
1250                    sqsize,
1251                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_dts : 0,
1252                    is->video_st ? is->video_st->codec->pts_correction_num_faulty_pts : 0);
1253             fflush(stdout);
1254             last_time = cur_time;
1255         }
1256     }
1257 }
1258
1259 /* allocate a picture (needs to do that in main thread to avoid
1260    potential locking problems */
1261 static void alloc_picture(void *opaque)
1262 {
1263     VideoState *is = opaque;
1264     VideoPicture *vp;
1265
1266     vp = &is->pictq[is->pictq_windex];
1267
1268     if (vp->bmp)
1269         SDL_FreeYUVOverlay(vp->bmp);
1270
1271 #if CONFIG_AVFILTER
1272     if (vp->picref)
1273         avfilter_unref_buffer(vp->picref);
1274     vp->picref = NULL;
1275
1276     vp->width   = is->out_video_filter->inputs[0]->w;
1277     vp->height  = is->out_video_filter->inputs[0]->h;
1278     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1279 #else
1280     vp->width   = is->video_st->codec->width;
1281     vp->height  = is->video_st->codec->height;
1282     vp->pix_fmt = is->video_st->codec->pix_fmt;
1283 #endif
1284
1285     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1286                                    SDL_YV12_OVERLAY,
1287                                    screen);
1288     if (!vp->bmp || vp->bmp->pitches[0] < vp->width) {
1289         /* SDL allocates a buffer smaller than requested if the video
1290          * overlay hardware is unable to support the requested size. */
1291         fprintf(stderr, "Error: the video system does not support an image\n"
1292                         "size of %dx%d pixels. Try using -lowres or -vf \"scale=w:h\"\n"
1293                         "to reduce the image size.\n", vp->width, vp->height );
1294         do_exit();
1295     }
1296
1297     SDL_LockMutex(is->pictq_mutex);
1298     vp->allocated = 1;
1299     SDL_CondSignal(is->pictq_cond);
1300     SDL_UnlockMutex(is->pictq_mutex);
1301 }
1302
1303 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1304 {
1305     VideoPicture *vp;
1306     double frame_delay, pts = pts1;
1307
1308     /* compute the exact PTS for the picture if it is omitted in the stream
1309      * pts1 is the dts of the pkt / pts of the frame */
1310     if (pts != 0) {
1311         /* update video clock with pts, if present */
1312         is->video_clock = pts;
1313     } else {
1314         pts = is->video_clock;
1315     }
1316     /* update video clock for next frame */
1317     frame_delay = av_q2d(is->video_st->codec->time_base);
1318     /* for MPEG2, the frame can be repeated, so we update the
1319        clock accordingly */
1320     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1321     is->video_clock += frame_delay;
1322
1323 #if defined(DEBUG_SYNC) && 0
1324     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1325            av_get_picture_type_char(src_frame->pict_type), pts, pts1);
1326 #endif
1327
1328     /* wait until we have space to put a new picture */
1329     SDL_LockMutex(is->pictq_mutex);
1330
1331     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1332         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1333
1334     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1335            !is->videoq.abort_request) {
1336         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1337     }
1338     SDL_UnlockMutex(is->pictq_mutex);
1339
1340     if (is->videoq.abort_request)
1341         return -1;
1342
1343     vp = &is->pictq[is->pictq_windex];
1344
1345     /* alloc or resize hardware picture buffer */
1346     if (!vp->bmp ||
1347 #if CONFIG_AVFILTER
1348         vp->width  != is->out_video_filter->inputs[0]->w ||
1349         vp->height != is->out_video_filter->inputs[0]->h) {
1350 #else
1351         vp->width != is->video_st->codec->width ||
1352         vp->height != is->video_st->codec->height) {
1353 #endif
1354         SDL_Event event;
1355
1356         vp->allocated = 0;
1357
1358         /* the allocation must be done in the main thread to avoid
1359            locking problems */
1360         event.type = FF_ALLOC_EVENT;
1361         event.user.data1 = is;
1362         SDL_PushEvent(&event);
1363
1364         /* wait until the picture is allocated */
1365         SDL_LockMutex(is->pictq_mutex);
1366         while (!vp->allocated && !is->videoq.abort_request) {
1367             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1368         }
1369         SDL_UnlockMutex(is->pictq_mutex);
1370
1371         if (is->videoq.abort_request)
1372             return -1;
1373     }
1374
1375     /* if the frame is not skipped, then display it */
1376     if (vp->bmp) {
1377         AVPicture pict;
1378 #if CONFIG_AVFILTER
1379         if(vp->picref)
1380             avfilter_unref_buffer(vp->picref);
1381         vp->picref = src_frame->opaque;
1382 #endif
1383
1384         /* get a pointer on the bitmap */
1385         SDL_LockYUVOverlay (vp->bmp);
1386
1387         memset(&pict,0,sizeof(AVPicture));
1388         pict.data[0] = vp->bmp->pixels[0];
1389         pict.data[1] = vp->bmp->pixels[2];
1390         pict.data[2] = vp->bmp->pixels[1];
1391
1392         pict.linesize[0] = vp->bmp->pitches[0];
1393         pict.linesize[1] = vp->bmp->pitches[2];
1394         pict.linesize[2] = vp->bmp->pitches[1];
1395
1396 #if CONFIG_AVFILTER
1397         //FIXME use direct rendering
1398         av_picture_copy(&pict, (AVPicture *)src_frame,
1399                         vp->pix_fmt, vp->width, vp->height);
1400 #else
1401         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1402         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1403             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1404             PIX_FMT_YUV420P, sws_flags, NULL, NULL, NULL);
1405         if (is->img_convert_ctx == NULL) {
1406             fprintf(stderr, "Cannot initialize the conversion context\n");
1407             exit(1);
1408         }
1409         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1410                   0, vp->height, pict.data, pict.linesize);
1411 #endif
1412         /* update the bitmap content */
1413         SDL_UnlockYUVOverlay(vp->bmp);
1414
1415         vp->pts = pts;
1416         vp->pos = pos;
1417
1418         /* now we can update the picture count */
1419         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1420             is->pictq_windex = 0;
1421         SDL_LockMutex(is->pictq_mutex);
1422         vp->target_clock= compute_target_time(vp->pts, is);
1423
1424         is->pictq_size++;
1425         SDL_UnlockMutex(is->pictq_mutex);
1426     }
1427     return 0;
1428 }
1429
1430 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1431 {
1432     int len1 av_unused, got_picture, i;
1433
1434     if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1435         return -1;
1436
1437     if (pkt->data == flush_pkt.data) {
1438         avcodec_flush_buffers(is->video_st->codec);
1439
1440         SDL_LockMutex(is->pictq_mutex);
1441         //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1442         for (i = 0; i < VIDEO_PICTURE_QUEUE_SIZE; i++) {
1443             is->pictq[i].target_clock= 0;
1444         }
1445         while (is->pictq_size && !is->videoq.abort_request) {
1446             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1447         }
1448         is->video_current_pos = -1;
1449         SDL_UnlockMutex(is->pictq_mutex);
1450
1451         is->frame_last_pts = AV_NOPTS_VALUE;
1452         is->frame_last_delay = 0;
1453         is->frame_timer = (double)av_gettime() / 1000000.0;
1454         is->skip_frames = 1;
1455         is->skip_frames_index = 0;
1456         return 0;
1457     }
1458
1459     len1 = avcodec_decode_video2(is->video_st->codec,
1460                                  frame, &got_picture,
1461                                  pkt);
1462
1463     if (got_picture) {
1464         if (decoder_reorder_pts == -1) {
1465             *pts = frame->best_effort_timestamp;
1466         } else if (decoder_reorder_pts) {
1467             *pts = frame->pkt_pts;
1468         } else {
1469             *pts = frame->pkt_dts;
1470         }
1471
1472         if (*pts == AV_NOPTS_VALUE) {
1473             *pts = 0;
1474         }
1475
1476         is->skip_frames_index += 1;
1477         if(is->skip_frames_index >= is->skip_frames){
1478             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1479             return 1;
1480         }
1481
1482     }
1483     return 0;
1484 }
1485
1486 #if CONFIG_AVFILTER
1487 typedef struct {
1488     VideoState *is;
1489     AVFrame *frame;
1490     int use_dr1;
1491 } FilterPriv;
1492
1493 static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
1494 {
1495     AVFilterContext *ctx = codec->opaque;
1496     AVFilterBufferRef  *ref;
1497     int perms = AV_PERM_WRITE;
1498     int i, w, h, stride[4];
1499     unsigned edge;
1500     int pixel_size;
1501
1502     av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1503
1504     if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
1505         perms |= AV_PERM_NEG_LINESIZES;
1506
1507     if(pic->buffer_hints & FF_BUFFER_HINTS_VALID) {
1508         if(pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
1509         if(pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
1510         if(pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
1511     }
1512     if(pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;
1513
1514     w = codec->width;
1515     h = codec->height;
1516
1517     if(av_image_check_size(w, h, 0, codec))
1518         return -1;
1519
1520     avcodec_align_dimensions2(codec, &w, &h, stride);
1521     edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
1522     w += edge << 1;
1523     h += edge << 1;
1524
1525     if(!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
1526         return -1;
1527
1528     pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1+1;
1529     ref->video->w = codec->width;
1530     ref->video->h = codec->height;
1531     for(i = 0; i < 4; i ++) {
1532         unsigned hshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
1533         unsigned vshift = (i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
1534
1535         if (ref->data[i]) {
1536             ref->data[i]    += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);
1537         }
1538         pic->data[i]     = ref->data[i];
1539         pic->linesize[i] = ref->linesize[i];
1540     }
1541     pic->opaque = ref;
1542     pic->age    = INT_MAX;
1543     pic->type   = FF_BUFFER_TYPE_USER;
1544     pic->reordered_opaque = codec->reordered_opaque;
1545     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1546     else           pic->pkt_pts = AV_NOPTS_VALUE;
1547     return 0;
1548 }
1549
1550 static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
1551 {
1552     memset(pic->data, 0, sizeof(pic->data));
1553     avfilter_unref_buffer(pic->opaque);
1554 }
1555
1556 static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
1557 {
1558     AVFilterBufferRef *ref = pic->opaque;
1559
1560     if (pic->data[0] == NULL) {
1561         pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
1562         return codec->get_buffer(codec, pic);
1563     }
1564
1565     if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
1566         (codec->pix_fmt != ref->format)) {
1567         av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
1568         return -1;
1569     }
1570
1571     pic->reordered_opaque = codec->reordered_opaque;
1572     if(codec->pkt) pic->pkt_pts = codec->pkt->pts;
1573     else           pic->pkt_pts = AV_NOPTS_VALUE;
1574     return 0;
1575 }
1576
1577 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1578 {
1579     FilterPriv *priv = ctx->priv;
1580     AVCodecContext *codec;
1581     if(!opaque) return -1;
1582
1583     priv->is = opaque;
1584     codec    = priv->is->video_st->codec;
1585     codec->opaque = ctx;
1586     if((codec->codec->capabilities & CODEC_CAP_DR1)
1587     ) {
1588         av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
1589         priv->use_dr1 = 1;
1590         codec->get_buffer     = input_get_buffer;
1591         codec->release_buffer = input_release_buffer;
1592         codec->reget_buffer   = input_reget_buffer;
1593         codec->thread_safe_callbacks = 1;
1594     }
1595
1596     priv->frame = avcodec_alloc_frame();
1597
1598     return 0;
1599 }
1600
1601 static void input_uninit(AVFilterContext *ctx)
1602 {
1603     FilterPriv *priv = ctx->priv;
1604     av_free(priv->frame);
1605 }
1606
1607 static int input_request_frame(AVFilterLink *link)
1608 {
1609     FilterPriv *priv = link->src->priv;
1610     AVFilterBufferRef *picref;
1611     int64_t pts = 0;
1612     AVPacket pkt;
1613     int ret;
1614
1615     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1616         av_free_packet(&pkt);
1617     if (ret < 0)
1618         return -1;
1619
1620     if(priv->use_dr1 && priv->frame->opaque) {
1621         picref = avfilter_ref_buffer(priv->frame->opaque, ~0);
1622     } else {
1623         picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1624         av_image_copy(picref->data, picref->linesize,
1625                       priv->frame->data, priv->frame->linesize,
1626                       picref->format, link->w, link->h);
1627     }
1628     av_free_packet(&pkt);
1629
1630     avfilter_copy_frame_props(picref, priv->frame);
1631     picref->pts = pts;
1632
1633     avfilter_start_frame(link, picref);
1634     avfilter_draw_slice(link, 0, link->h, 1);
1635     avfilter_end_frame(link);
1636
1637     return 0;
1638 }
1639
1640 static int input_query_formats(AVFilterContext *ctx)
1641 {
1642     FilterPriv *priv = ctx->priv;
1643     enum PixelFormat pix_fmts[] = {
1644         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1645     };
1646
1647     avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list(pix_fmts));
1648     return 0;
1649 }
1650
1651 static int input_config_props(AVFilterLink *link)
1652 {
1653     FilterPriv *priv  = link->src->priv;
1654     AVCodecContext *c = priv->is->video_st->codec;
1655
1656     link->w = c->width;
1657     link->h = c->height;
1658     link->time_base = priv->is->video_st->time_base;
1659
1660     return 0;
1661 }
1662
1663 static AVFilter input_filter =
1664 {
1665     .name      = "ffplay_input",
1666
1667     .priv_size = sizeof(FilterPriv),
1668
1669     .init      = input_init,
1670     .uninit    = input_uninit,
1671
1672     .query_formats = input_query_formats,
1673
1674     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1675     .outputs   = (AVFilterPad[]) {{ .name = "default",
1676                                     .type = AVMEDIA_TYPE_VIDEO,
1677                                     .request_frame = input_request_frame,
1678                                     .config_props  = input_config_props, },
1679                                   { .name = NULL }},
1680 };
1681
1682 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
1683 {
1684     char sws_flags_str[128];
1685     int ret;
1686     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1687     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1688     snprintf(sws_flags_str, sizeof(sws_flags_str), "flags=%d", sws_flags);
1689     graph->scale_sws_opts = av_strdup(sws_flags_str);
1690
1691     if ((ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src",
1692                                             NULL, is, graph)) < 0)
1693         goto the_end;
1694     if ((ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"), "out",
1695                                             NULL, pix_fmts, graph)) < 0)
1696         goto the_end;
1697
1698     if(vfilters) {
1699         AVFilterInOut *outputs = avfilter_inout_alloc();
1700         AVFilterInOut *inputs  = avfilter_inout_alloc();
1701
1702         outputs->name    = av_strdup("in");
1703         outputs->filter_ctx = filt_src;
1704         outputs->pad_idx = 0;
1705         outputs->next    = NULL;
1706
1707         inputs->name    = av_strdup("out");
1708         inputs->filter_ctx = filt_out;
1709         inputs->pad_idx = 0;
1710         inputs->next    = NULL;
1711
1712         if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
1713             goto the_end;
1714         av_freep(&vfilters);
1715     } else {
1716         if ((ret = avfilter_link(filt_src, 0, filt_out, 0)) < 0)
1717             goto the_end;
1718     }
1719
1720     if ((ret = avfilter_graph_config(graph, NULL)) < 0)
1721         goto the_end;
1722
1723     is->out_video_filter = filt_out;
1724 the_end:
1725     return ret;
1726 }
1727
1728 #endif  /* CONFIG_AVFILTER */
1729
1730 static int video_thread(void *arg)
1731 {
1732     VideoState *is = arg;
1733     AVFrame *frame= avcodec_alloc_frame();
1734     int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
1735     double pts;
1736     int ret;
1737
1738 #if CONFIG_AVFILTER
1739     AVFilterGraph *graph = avfilter_graph_alloc();
1740     AVFilterContext *filt_out = NULL;
1741
1742     if ((ret = configure_video_filters(graph, is, vfilters)) < 0)
1743         goto the_end;
1744     filt_out = is->out_video_filter;
1745 #endif
1746
1747     for(;;) {
1748 #if !CONFIG_AVFILTER
1749         AVPacket pkt;
1750 #else
1751         AVFilterBufferRef *picref;
1752         AVRational tb = filt_out->inputs[0]->time_base;
1753 #endif
1754         while (is->paused && !is->videoq.abort_request)
1755             SDL_Delay(10);
1756 #if CONFIG_AVFILTER
1757         ret = av_vsink_buffer_get_video_buffer_ref(filt_out, &picref, 0);
1758         if (picref) {
1759             avfilter_fill_frame_from_video_buffer_ref(frame, picref);
1760             pts_int = picref->pts;
1761             pos     = picref->pos;
1762             frame->opaque = picref;
1763         }
1764
1765         if (av_cmp_q(tb, is->video_st->time_base)) {
1766             av_unused int64_t pts1 = pts_int;
1767             pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
1768             av_dlog(NULL, "video_thread(): "
1769                     "tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
1770                     tb.num, tb.den, pts1,
1771                     is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
1772         }
1773 #else
1774         ret = get_video_frame(is, frame, &pts_int, &pkt);
1775         pos = pkt.pos;
1776         av_free_packet(&pkt);
1777 #endif
1778
1779         if (ret < 0) goto the_end;
1780
1781         if (!picref)
1782             continue;
1783
1784         pts = pts_int*av_q2d(is->video_st->time_base);
1785
1786         ret = queue_picture(is, frame, pts, pos);
1787
1788         if (ret < 0)
1789             goto the_end;
1790
1791         if (step)
1792             if (cur_stream)
1793                 stream_toggle_pause(cur_stream);
1794     }
1795  the_end:
1796 #if CONFIG_AVFILTER
1797     avfilter_graph_free(&graph);
1798 #endif
1799     av_free(frame);
1800     return 0;
1801 }
1802
1803 static int subtitle_thread(void *arg)
1804 {
1805     VideoState *is = arg;
1806     SubPicture *sp;
1807     AVPacket pkt1, *pkt = &pkt1;
1808     int len1 av_unused, got_subtitle;
1809     double pts;
1810     int i, j;
1811     int r, g, b, y, u, v, a;
1812
1813     for(;;) {
1814         while (is->paused && !is->subtitleq.abort_request) {
1815             SDL_Delay(10);
1816         }
1817         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1818             break;
1819
1820         if(pkt->data == flush_pkt.data){
1821             avcodec_flush_buffers(is->subtitle_st->codec);
1822             continue;
1823         }
1824         SDL_LockMutex(is->subpq_mutex);
1825         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1826                !is->subtitleq.abort_request) {
1827             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1828         }
1829         SDL_UnlockMutex(is->subpq_mutex);
1830
1831         if (is->subtitleq.abort_request)
1832             goto the_end;
1833
1834         sp = &is->subpq[is->subpq_windex];
1835
1836        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1837            this packet, if any */
1838         pts = 0;
1839         if (pkt->pts != AV_NOPTS_VALUE)
1840             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1841
1842         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1843                                     &sp->sub, &got_subtitle,
1844                                     pkt);
1845         if (got_subtitle && sp->sub.format == 0) {
1846             sp->pts = pts;
1847
1848             for (i = 0; i < sp->sub.num_rects; i++)
1849             {
1850                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1851                 {
1852                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1853                     y = RGB_TO_Y_CCIR(r, g, b);
1854                     u = RGB_TO_U_CCIR(r, g, b, 0);
1855                     v = RGB_TO_V_CCIR(r, g, b, 0);
1856                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1857                 }
1858             }
1859
1860             /* now we can update the picture count */
1861             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1862                 is->subpq_windex = 0;
1863             SDL_LockMutex(is->subpq_mutex);
1864             is->subpq_size++;
1865             SDL_UnlockMutex(is->subpq_mutex);
1866         }
1867         av_free_packet(pkt);
1868     }
1869  the_end:
1870     return 0;
1871 }
1872
1873 /* copy samples for viewing in editor window */
1874 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1875 {
1876     int size, len;
1877
1878     size = samples_size / sizeof(short);
1879     while (size > 0) {
1880         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1881         if (len > size)
1882             len = size;
1883         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1884         samples += len;
1885         is->sample_array_index += len;
1886         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1887             is->sample_array_index = 0;
1888         size -= len;
1889     }
1890 }
1891
1892 /* return the new audio buffer size (samples can be added or deleted
1893    to get better sync if video or external master clock) */
1894 static int synchronize_audio(VideoState *is, short *samples,
1895                              int samples_size1, double pts)
1896 {
1897     int n, samples_size;
1898     double ref_clock;
1899
1900     n = 2 * is->audio_st->codec->channels;
1901     samples_size = samples_size1;
1902
1903     /* if not master, then we try to remove or add samples to correct the clock */
1904     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1905          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1906         double diff, avg_diff;
1907         int wanted_size, min_size, max_size, nb_samples;
1908
1909         ref_clock = get_master_clock(is);
1910         diff = get_audio_clock(is) - ref_clock;
1911
1912         if (diff < AV_NOSYNC_THRESHOLD) {
1913             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1914             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1915                 /* not enough measures to have a correct estimate */
1916                 is->audio_diff_avg_count++;
1917             } else {
1918                 /* estimate the A-V difference */
1919                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1920
1921                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1922                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1923                     nb_samples = samples_size / n;
1924
1925                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1926                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1927                     if (wanted_size < min_size)
1928                         wanted_size = min_size;
1929                     else if (wanted_size > max_size)
1930                         wanted_size = max_size;
1931
1932                     /* add or remove samples to correction the synchro */
1933                     if (wanted_size < samples_size) {
1934                         /* remove samples */
1935                         samples_size = wanted_size;
1936                     } else if (wanted_size > samples_size) {
1937                         uint8_t *samples_end, *q;
1938                         int nb;
1939
1940                         /* add samples */
1941                         nb = (samples_size - wanted_size);
1942                         samples_end = (uint8_t *)samples + samples_size - n;
1943                         q = samples_end + n;
1944                         while (nb > 0) {
1945                             memcpy(q, samples_end, n);
1946                             q += n;
1947                             nb -= n;
1948                         }
1949                         samples_size = wanted_size;
1950                     }
1951                 }
1952 #if 0
1953                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1954                        diff, avg_diff, samples_size - samples_size1,
1955                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1956 #endif
1957             }
1958         } else {
1959             /* too big difference : may be initial PTS errors, so
1960                reset A-V filter */
1961             is->audio_diff_avg_count = 0;
1962             is->audio_diff_cum = 0;
1963         }
1964     }
1965
1966     return samples_size;
1967 }
1968
1969 /* decode one audio frame and returns its uncompressed size */
1970 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1971 {
1972     AVPacket *pkt_temp = &is->audio_pkt_temp;
1973     AVPacket *pkt = &is->audio_pkt;
1974     AVCodecContext *dec= is->audio_st->codec;
1975     int n, len1, data_size;
1976     double pts;
1977
1978     for(;;) {
1979         /* NOTE: the audio packet can contain several frames */
1980         while (pkt_temp->size > 0) {
1981             data_size = sizeof(is->audio_buf1);
1982             len1 = avcodec_decode_audio3(dec,
1983                                         (int16_t *)is->audio_buf1, &data_size,
1984                                         pkt_temp);
1985             if (len1 < 0) {
1986                 /* if error, we skip the frame */
1987                 pkt_temp->size = 0;
1988                 break;
1989             }
1990
1991             pkt_temp->data += len1;
1992             pkt_temp->size -= len1;
1993             if (data_size <= 0)
1994                 continue;
1995
1996             if (dec->sample_fmt != is->audio_src_fmt) {
1997                 if (is->reformat_ctx)
1998                     av_audio_convert_free(is->reformat_ctx);
1999                 is->reformat_ctx= av_audio_convert_alloc(AV_SAMPLE_FMT_S16, 1,
2000                                                          dec->sample_fmt, 1, NULL, 0);
2001                 if (!is->reformat_ctx) {
2002                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
2003                         av_get_sample_fmt_name(dec->sample_fmt),
2004                         av_get_sample_fmt_name(AV_SAMPLE_FMT_S16));
2005                         break;
2006                 }
2007                 is->audio_src_fmt= dec->sample_fmt;
2008             }
2009
2010             if (is->reformat_ctx) {
2011                 const void *ibuf[6]= {is->audio_buf1};
2012                 void *obuf[6]= {is->audio_buf2};
2013                 int istride[6]= {av_get_bits_per_sample_fmt(dec->sample_fmt)/8};
2014                 int ostride[6]= {2};
2015                 int len= data_size/istride[0];
2016                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2017                     printf("av_audio_convert() failed\n");
2018                     break;
2019                 }
2020                 is->audio_buf= is->audio_buf2;
2021                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2022                           remove this legacy cruft */
2023                 data_size= len*2;
2024             }else{
2025                 is->audio_buf= is->audio_buf1;
2026             }
2027
2028             /* if no pts, then compute it */
2029             pts = is->audio_clock;
2030             *pts_ptr = pts;
2031             n = 2 * dec->channels;
2032             is->audio_clock += (double)data_size /
2033                 (double)(n * dec->sample_rate);
2034 #ifdef DEBUG
2035             {
2036                 static double last_clock;
2037                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2038                        is->audio_clock - last_clock,
2039                        is->audio_clock, pts);
2040                 last_clock = is->audio_clock;
2041             }
2042 #endif
2043             return data_size;
2044         }
2045
2046         /* free the current packet */
2047         if (pkt->data)
2048             av_free_packet(pkt);
2049
2050         if (is->paused || is->audioq.abort_request) {
2051             return -1;
2052         }
2053
2054         /* read next packet */
2055         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2056             return -1;
2057         if(pkt->data == flush_pkt.data){
2058             avcodec_flush_buffers(dec);
2059             continue;
2060         }
2061
2062         pkt_temp->data = pkt->data;
2063         pkt_temp->size = pkt->size;
2064
2065         /* if update the audio clock with the pts */
2066         if (pkt->pts != AV_NOPTS_VALUE) {
2067             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2068         }
2069     }
2070 }
2071
2072 /* prepare a new audio buffer */
2073 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2074 {
2075     VideoState *is = opaque;
2076     int audio_size, len1;
2077     double pts;
2078
2079     audio_callback_time = av_gettime();
2080
2081     while (len > 0) {
2082         if (is->audio_buf_index >= is->audio_buf_size) {
2083            audio_size = audio_decode_frame(is, &pts);
2084            if (audio_size < 0) {
2085                 /* if error, just output silence */
2086                is->audio_buf = is->audio_buf1;
2087                is->audio_buf_size = 1024;
2088                memset(is->audio_buf, 0, is->audio_buf_size);
2089            } else {
2090                if (is->show_mode != SHOW_MODE_VIDEO)
2091                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2092                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2093                                               pts);
2094                is->audio_buf_size = audio_size;
2095            }
2096            is->audio_buf_index = 0;
2097         }
2098         len1 = is->audio_buf_size - is->audio_buf_index;
2099         if (len1 > len)
2100             len1 = len;
2101         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2102         len -= len1;
2103         stream += len1;
2104         is->audio_buf_index += len1;
2105     }
2106 }
2107
2108 /* open a given stream. Return 0 if OK */
2109 static int stream_component_open(VideoState *is, int stream_index)
2110 {
2111     AVFormatContext *ic = is->ic;
2112     AVCodecContext *avctx;
2113     AVCodec *codec;
2114     SDL_AudioSpec wanted_spec, spec;
2115
2116     if (stream_index < 0 || stream_index >= ic->nb_streams)
2117         return -1;
2118     avctx = ic->streams[stream_index]->codec;
2119
2120     /* prepare audio output */
2121     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2122         if (avctx->channels > 0) {
2123             avctx->request_channels = FFMIN(2, avctx->channels);
2124         } else {
2125             avctx->request_channels = 2;
2126         }
2127     }
2128
2129     codec = avcodec_find_decoder(avctx->codec_id);
2130     if (!codec)
2131         return -1;
2132
2133     avctx->workaround_bugs = workaround_bugs;
2134     avctx->lowres = lowres;
2135     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2136     avctx->idct_algo= idct;
2137     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2138     avctx->skip_frame= skip_frame;
2139     avctx->skip_idct= skip_idct;
2140     avctx->skip_loop_filter= skip_loop_filter;
2141     avctx->error_recognition= error_recognition;
2142     avctx->error_concealment= error_concealment;
2143     avctx->thread_count= thread_count;
2144
2145     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0, codec);
2146
2147     if(codec->capabilities & CODEC_CAP_DR1)
2148         avctx->flags |= CODEC_FLAG_EMU_EDGE;
2149
2150     if (avcodec_open(avctx, codec) < 0)
2151         return -1;
2152
2153     /* prepare audio output */
2154     if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
2155         if(avctx->sample_rate <= 0 || avctx->channels <= 0){
2156             fprintf(stderr, "Invalid sample rate or channel count\n");
2157             return -1;
2158         }
2159         wanted_spec.freq = avctx->sample_rate;
2160         wanted_spec.format = AUDIO_S16SYS;
2161         wanted_spec.channels = avctx->channels;
2162         wanted_spec.silence = 0;
2163         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2164         wanted_spec.callback = sdl_audio_callback;
2165         wanted_spec.userdata = is;
2166         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2167             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2168             return -1;
2169         }
2170         is->audio_hw_buf_size = spec.size;
2171         is->audio_src_fmt= AV_SAMPLE_FMT_S16;
2172     }
2173
2174     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2175     switch(avctx->codec_type) {
2176     case AVMEDIA_TYPE_AUDIO:
2177         is->audio_stream = stream_index;
2178         is->audio_st = ic->streams[stream_index];
2179         is->audio_buf_size = 0;
2180         is->audio_buf_index = 0;
2181
2182         /* init averaging filter */
2183         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2184         is->audio_diff_avg_count = 0;
2185         /* since we do not have a precise anough audio fifo fullness,
2186            we correct audio sync only if larger than this threshold */
2187         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2188
2189         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2190         packet_queue_init(&is->audioq);
2191         SDL_PauseAudio(0);
2192         break;
2193     case AVMEDIA_TYPE_VIDEO:
2194         is->video_stream = stream_index;
2195         is->video_st = ic->streams[stream_index];
2196
2197         packet_queue_init(&is->videoq);
2198         is->video_tid = SDL_CreateThread(video_thread, is);
2199         break;
2200     case AVMEDIA_TYPE_SUBTITLE:
2201         is->subtitle_stream = stream_index;
2202         is->subtitle_st = ic->streams[stream_index];
2203         packet_queue_init(&is->subtitleq);
2204
2205         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2206         break;
2207     default:
2208         break;
2209     }
2210     return 0;
2211 }
2212
2213 static void stream_component_close(VideoState *is, int stream_index)
2214 {
2215     AVFormatContext *ic = is->ic;
2216     AVCodecContext *avctx;
2217
2218     if (stream_index < 0 || stream_index >= ic->nb_streams)
2219         return;
2220     avctx = ic->streams[stream_index]->codec;
2221
2222     switch(avctx->codec_type) {
2223     case AVMEDIA_TYPE_AUDIO:
2224         packet_queue_abort(&is->audioq);
2225
2226         SDL_CloseAudio();
2227
2228         packet_queue_end(&is->audioq);
2229         if (is->reformat_ctx)
2230             av_audio_convert_free(is->reformat_ctx);
2231         is->reformat_ctx = NULL;
2232         break;
2233     case AVMEDIA_TYPE_VIDEO:
2234         packet_queue_abort(&is->videoq);
2235
2236         /* note: we also signal this mutex to make sure we deblock the
2237            video thread in all cases */
2238         SDL_LockMutex(is->pictq_mutex);
2239         SDL_CondSignal(is->pictq_cond);
2240         SDL_UnlockMutex(is->pictq_mutex);
2241
2242         SDL_WaitThread(is->video_tid, NULL);
2243
2244         packet_queue_end(&is->videoq);
2245         break;
2246     case AVMEDIA_TYPE_SUBTITLE:
2247         packet_queue_abort(&is->subtitleq);
2248
2249         /* note: we also signal this mutex to make sure we deblock the
2250            video thread in all cases */
2251         SDL_LockMutex(is->subpq_mutex);
2252         is->subtitle_stream_changed = 1;
2253
2254         SDL_CondSignal(is->subpq_cond);
2255         SDL_UnlockMutex(is->subpq_mutex);
2256
2257         SDL_WaitThread(is->subtitle_tid, NULL);
2258
2259         packet_queue_end(&is->subtitleq);
2260         break;
2261     default:
2262         break;
2263     }
2264
2265     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2266     avcodec_close(avctx);
2267     switch(avctx->codec_type) {
2268     case AVMEDIA_TYPE_AUDIO:
2269         is->audio_st = NULL;
2270         is->audio_stream = -1;
2271         break;
2272     case AVMEDIA_TYPE_VIDEO:
2273         is->video_st = NULL;
2274         is->video_stream = -1;
2275         break;
2276     case AVMEDIA_TYPE_SUBTITLE:
2277         is->subtitle_st = NULL;
2278         is->subtitle_stream = -1;
2279         break;
2280     default:
2281         break;
2282     }
2283 }
2284
2285 /* since we have only one decoding thread, we can use a global
2286    variable instead of a thread local variable */
2287 static VideoState *global_video_state;
2288
2289 static int decode_interrupt_cb(void)
2290 {
2291     return (global_video_state && global_video_state->abort_request);
2292 }
2293
2294 /* this thread gets the stream from the disk or the network */
2295 static int read_thread(void *arg)
2296 {
2297     VideoState *is = arg;
2298     AVFormatContext *ic;
2299     int err, i, ret;
2300     int st_index[AVMEDIA_TYPE_NB];
2301     AVPacket pkt1, *pkt = &pkt1;
2302     AVFormatParameters params, *ap = &params;
2303     int eof=0;
2304     int pkt_in_play_range = 0;
2305
2306     ic = avformat_alloc_context();
2307
2308     memset(st_index, -1, sizeof(st_index));
2309     is->video_stream = -1;
2310     is->audio_stream = -1;
2311     is->subtitle_stream = -1;
2312
2313     global_video_state = is;
2314     avio_set_interrupt_cb(decode_interrupt_cb);
2315
2316     memset(ap, 0, sizeof(*ap));
2317
2318     ap->prealloced_context = 1;
2319     ap->width = frame_width;
2320     ap->height= frame_height;
2321     ap->time_base= (AVRational){1, 25};
2322     ap->pix_fmt = frame_pix_fmt;
2323     ic->flags |= AVFMT_FLAG_PRIV_OPT;
2324
2325
2326     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2327     if (err >= 0) {
2328         set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM, NULL);
2329         err = av_demuxer_open(ic, ap);
2330         if(err < 0){
2331             avformat_free_context(ic);
2332             ic= NULL;
2333         }
2334     }
2335     if (err < 0) {
2336         print_error(is->filename, err);
2337         ret = -1;
2338         goto fail;
2339     }
2340     is->ic = ic;
2341
2342     if(genpts)
2343         ic->flags |= AVFMT_FLAG_GENPTS;
2344
2345     err = av_find_stream_info(ic);
2346     if (err < 0) {
2347         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2348         ret = -1;
2349         goto fail;
2350     }
2351     if(ic->pb)
2352         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2353
2354     if(seek_by_bytes<0)
2355         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2356
2357     /* if seeking requested, we execute it */
2358     if (start_time != AV_NOPTS_VALUE) {
2359         int64_t timestamp;
2360
2361         timestamp = start_time;
2362         /* add the stream start time */
2363         if (ic->start_time != AV_NOPTS_VALUE)
2364             timestamp += ic->start_time;
2365         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2366         if (ret < 0) {
2367             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2368                     is->filename, (double)timestamp / AV_TIME_BASE);
2369         }
2370     }
2371
2372     for (i = 0; i < ic->nb_streams; i++)
2373         ic->streams[i]->discard = AVDISCARD_ALL;
2374     if (!video_disable)
2375         st_index[AVMEDIA_TYPE_VIDEO] =
2376             av_find_best_stream(ic, AVMEDIA_TYPE_VIDEO,
2377                                 wanted_stream[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2378     if (!audio_disable)
2379         st_index[AVMEDIA_TYPE_AUDIO] =
2380             av_find_best_stream(ic, AVMEDIA_TYPE_AUDIO,
2381                                 wanted_stream[AVMEDIA_TYPE_AUDIO],
2382                                 st_index[AVMEDIA_TYPE_VIDEO],
2383                                 NULL, 0);
2384     if (!video_disable)
2385         st_index[AVMEDIA_TYPE_SUBTITLE] =
2386             av_find_best_stream(ic, AVMEDIA_TYPE_SUBTITLE,
2387                                 wanted_stream[AVMEDIA_TYPE_SUBTITLE],
2388                                 (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2389                                  st_index[AVMEDIA_TYPE_AUDIO] :
2390                                  st_index[AVMEDIA_TYPE_VIDEO]),
2391                                 NULL, 0);
2392     if (show_status) {
2393         av_dump_format(ic, 0, is->filename, 0);
2394     }
2395
2396     is->show_mode = show_mode;
2397
2398     /* open the streams */
2399     if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2400         stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2401     }
2402
2403     ret=-1;
2404     if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2405         ret= stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2406     }
2407     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2408     if (is->show_mode == SHOW_MODE_NONE)
2409         is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2410
2411     if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2412         stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2413     }
2414
2415     if (is->video_stream < 0 && is->audio_stream < 0) {
2416         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2417         ret = -1;
2418         goto fail;
2419     }
2420
2421     for(;;) {
2422         if (is->abort_request)
2423             break;
2424         if (is->paused != is->last_paused) {
2425             is->last_paused = is->paused;
2426             if (is->paused)
2427                 is->read_pause_return= av_read_pause(ic);
2428             else
2429                 av_read_play(ic);
2430         }
2431 #if CONFIG_RTSP_DEMUXER
2432         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2433             /* wait 10 ms to avoid trying to get another packet */
2434             /* XXX: horrible */
2435             SDL_Delay(10);
2436             continue;
2437         }
2438 #endif
2439         if (is->seek_req) {
2440             int64_t seek_target= is->seek_pos;
2441             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2442             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2443 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2444 //      of the seek_pos/seek_rel variables
2445
2446             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2447             if (ret < 0) {
2448                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2449             }else{
2450                 if (is->audio_stream >= 0) {
2451                     packet_queue_flush(&is->audioq);
2452                     packet_queue_put(&is->audioq, &flush_pkt);
2453                 }
2454                 if (is->subtitle_stream >= 0) {
2455                     packet_queue_flush(&is->subtitleq);
2456                     packet_queue_put(&is->subtitleq, &flush_pkt);
2457                 }
2458                 if (is->video_stream >= 0) {
2459                     packet_queue_flush(&is->videoq);
2460                     packet_queue_put(&is->videoq, &flush_pkt);
2461                 }
2462             }
2463             is->seek_req = 0;
2464             eof= 0;
2465         }
2466
2467         /* if the queue are full, no need to read more */
2468         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2469             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2470                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2471                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2472             /* wait 10 ms */
2473             SDL_Delay(10);
2474             continue;
2475         }
2476         if(eof) {
2477             if(is->video_stream >= 0){
2478                 av_init_packet(pkt);
2479                 pkt->data=NULL;
2480                 pkt->size=0;
2481                 pkt->stream_index= is->video_stream;
2482                 packet_queue_put(&is->videoq, pkt);
2483             }
2484             SDL_Delay(10);
2485             if(is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2486                 if(loop!=1 && (!loop || --loop)){
2487                     stream_seek(cur_stream, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2488                 }else if(autoexit){
2489                     ret=AVERROR_EOF;
2490                     goto fail;
2491                 }
2492             }
2493             eof=0;
2494             continue;
2495         }
2496         ret = av_read_frame(ic, pkt);
2497         if (ret < 0) {
2498             if (ret == AVERROR_EOF || url_feof(ic->pb))
2499                 eof=1;
2500             if (ic->pb && ic->pb->error)
2501                 break;
2502             SDL_Delay(100); /* wait for user event */
2503             continue;
2504         }
2505         /* check if packet is in play range specified by user, then queue, otherwise discard */
2506         pkt_in_play_range = duration == AV_NOPTS_VALUE ||
2507                 (pkt->pts - ic->streams[pkt->stream_index]->start_time) *
2508                 av_q2d(ic->streams[pkt->stream_index]->time_base) -
2509                 (double)(start_time != AV_NOPTS_VALUE ? start_time : 0)/1000000
2510                 <= ((double)duration/1000000);
2511         if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
2512             packet_queue_put(&is->audioq, pkt);
2513         } else if (pkt->stream_index == is->video_stream && pkt_in_play_range) {
2514             packet_queue_put(&is->videoq, pkt);
2515         } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
2516             packet_queue_put(&is->subtitleq, pkt);
2517         } else {
2518             av_free_packet(pkt);
2519         }
2520     }
2521     /* wait until the end */
2522     while (!is->abort_request) {
2523         SDL_Delay(100);
2524     }
2525
2526     ret = 0;
2527  fail:
2528     /* disable interrupting */
2529     global_video_state = NULL;
2530
2531     /* close each stream */
2532     if (is->audio_stream >= 0)
2533         stream_component_close(is, is->audio_stream);
2534     if (is->video_stream >= 0)
2535         stream_component_close(is, is->video_stream);
2536     if (is->subtitle_stream >= 0)
2537         stream_component_close(is, is->subtitle_stream);
2538     if (is->ic) {
2539         av_close_input_file(is->ic);
2540         is->ic = NULL; /* safety */
2541     }
2542     avio_set_interrupt_cb(NULL);
2543
2544     if (ret != 0) {
2545         SDL_Event event;
2546
2547         event.type = FF_QUIT_EVENT;
2548         event.user.data1 = is;
2549         SDL_PushEvent(&event);
2550     }
2551     return 0;
2552 }
2553
2554 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2555 {
2556     VideoState *is;
2557
2558     is = av_mallocz(sizeof(VideoState));
2559     if (!is)
2560         return NULL;
2561     av_strlcpy(is->filename, filename, sizeof(is->filename));
2562     is->iformat = iformat;
2563     is->ytop = 0;
2564     is->xleft = 0;
2565
2566     /* start video display */
2567     is->pictq_mutex = SDL_CreateMutex();
2568     is->pictq_cond = SDL_CreateCond();
2569
2570     is->subpq_mutex = SDL_CreateMutex();
2571     is->subpq_cond = SDL_CreateCond();
2572
2573     is->av_sync_type = av_sync_type;
2574     is->read_tid = SDL_CreateThread(read_thread, is);
2575     if (!is->read_tid) {
2576         av_free(is);
2577         return NULL;
2578     }
2579     return is;
2580 }
2581
2582 static void stream_cycle_channel(VideoState *is, int codec_type)
2583 {
2584     AVFormatContext *ic = is->ic;
2585     int start_index, stream_index;
2586     AVStream *st;
2587
2588     if (codec_type == AVMEDIA_TYPE_VIDEO)
2589         start_index = is->video_stream;
2590     else if (codec_type == AVMEDIA_TYPE_AUDIO)
2591         start_index = is->audio_stream;
2592     else
2593         start_index = is->subtitle_stream;
2594     if (start_index < (codec_type == AVMEDIA_TYPE_SUBTITLE ? -1 : 0))
2595         return;
2596     stream_index = start_index;
2597     for(;;) {
2598         if (++stream_index >= is->ic->nb_streams)
2599         {
2600             if (codec_type == AVMEDIA_TYPE_SUBTITLE)
2601             {
2602                 stream_index = -1;
2603                 goto the_end;
2604             } else
2605                 stream_index = 0;
2606         }
2607         if (stream_index == start_index)
2608             return;
2609         st = ic->streams[stream_index];
2610         if (st->codec->codec_type == codec_type) {
2611             /* check that parameters are OK */
2612             switch(codec_type) {
2613             case AVMEDIA_TYPE_AUDIO:
2614                 if (st->codec->sample_rate != 0 &&
2615                     st->codec->channels != 0)
2616                     goto the_end;
2617                 break;
2618             case AVMEDIA_TYPE_VIDEO:
2619             case AVMEDIA_TYPE_SUBTITLE:
2620                 goto the_end;
2621             default:
2622                 break;
2623             }
2624         }
2625     }
2626  the_end:
2627     stream_component_close(is, start_index);
2628     stream_component_open(is, stream_index);
2629 }
2630
2631
2632 static void toggle_full_screen(void)
2633 {
2634     is_full_screen = !is_full_screen;
2635     video_open(cur_stream);
2636 }
2637
2638 static void toggle_pause(void)
2639 {
2640     if (cur_stream)
2641         stream_toggle_pause(cur_stream);
2642     step = 0;
2643 }
2644
2645 static void step_to_next_frame(void)
2646 {
2647     if (cur_stream) {
2648         /* if the stream is paused unpause it, then step */
2649         if (cur_stream->paused)
2650             stream_toggle_pause(cur_stream);
2651     }
2652     step = 1;
2653 }
2654
2655 static void toggle_audio_display(void)
2656 {
2657     if (cur_stream) {
2658         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2659         cur_stream->show_mode = (cur_stream->show_mode + 1) % SHOW_MODE_NB;
2660         fill_rectangle(screen,
2661                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2662                     bgcolor);
2663         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2664     }
2665 }
2666
2667 /* handle an event sent by the GUI */
2668 static void event_loop(void)
2669 {
2670     SDL_Event event;
2671     double incr, pos, frac;
2672
2673     for(;;) {
2674         double x;
2675         SDL_WaitEvent(&event);
2676         switch(event.type) {
2677         case SDL_KEYDOWN:
2678             if (exit_on_keydown) {
2679                 do_exit();
2680                 break;
2681             }
2682             switch(event.key.keysym.sym) {
2683             case SDLK_ESCAPE:
2684             case SDLK_q:
2685                 do_exit();
2686                 break;
2687             case SDLK_f:
2688                 toggle_full_screen();
2689                 break;
2690             case SDLK_p:
2691             case SDLK_SPACE:
2692                 toggle_pause();
2693                 break;
2694             case SDLK_s: //S: Step to next frame
2695                 step_to_next_frame();
2696                 break;
2697             case SDLK_a:
2698                 if (cur_stream)
2699                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_AUDIO);
2700                 break;
2701             case SDLK_v:
2702                 if (cur_stream)
2703                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_VIDEO);
2704                 break;
2705             case SDLK_t:
2706                 if (cur_stream)
2707                     stream_cycle_channel(cur_stream, AVMEDIA_TYPE_SUBTITLE);
2708                 break;
2709             case SDLK_w:
2710                 toggle_audio_display();
2711                 break;
2712             case SDLK_LEFT:
2713                 incr = -10.0;
2714                 goto do_seek;
2715             case SDLK_RIGHT:
2716                 incr = 10.0;
2717                 goto do_seek;
2718             case SDLK_UP:
2719                 incr = 60.0;
2720                 goto do_seek;
2721             case SDLK_DOWN:
2722                 incr = -60.0;
2723             do_seek:
2724                 if (cur_stream) {
2725                     if (seek_by_bytes) {
2726                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2727                             pos= cur_stream->video_current_pos;
2728                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2729                             pos= cur_stream->audio_pkt.pos;
2730                         }else
2731                             pos = avio_tell(cur_stream->ic->pb);
2732                         if (cur_stream->ic->bit_rate)
2733                             incr *= cur_stream->ic->bit_rate / 8.0;
2734                         else
2735                             incr *= 180000.0;
2736                         pos += incr;
2737                         stream_seek(cur_stream, pos, incr, 1);
2738                     } else {
2739                         pos = get_master_clock(cur_stream);
2740                         pos += incr;
2741                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2742                     }
2743                 }
2744                 break;
2745             default:
2746                 break;
2747             }
2748             break;
2749         case SDL_MOUSEBUTTONDOWN:
2750             if (exit_on_mousedown) {
2751                 do_exit();
2752                 break;
2753             }
2754         case SDL_MOUSEMOTION:
2755             if(event.type ==SDL_MOUSEBUTTONDOWN){
2756                 x= event.button.x;
2757             }else{
2758                 if(event.motion.state != SDL_PRESSED)
2759                     break;
2760                 x= event.motion.x;
2761             }
2762             if (cur_stream) {
2763                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2764                     uint64_t size=  avio_size(cur_stream->ic->pb);
2765                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2766                 }else{
2767                     int64_t ts;
2768                     int ns, hh, mm, ss;
2769                     int tns, thh, tmm, tss;
2770                     tns = cur_stream->ic->duration/1000000LL;
2771                     thh = tns/3600;
2772                     tmm = (tns%3600)/60;
2773                     tss = (tns%60);
2774                     frac = x/cur_stream->width;
2775                     ns = frac*tns;
2776                     hh = ns/3600;
2777                     mm = (ns%3600)/60;
2778                     ss = (ns%60);
2779                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2780                             hh, mm, ss, thh, tmm, tss);
2781                     ts = frac*cur_stream->ic->duration;
2782                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2783                         ts += cur_stream->ic->start_time;
2784                     stream_seek(cur_stream, ts, 0, 0);
2785                 }
2786             }
2787             break;
2788         case SDL_VIDEORESIZE:
2789             if (cur_stream) {
2790                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2791                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2792                 screen_width = cur_stream->width = event.resize.w;
2793                 screen_height= cur_stream->height= event.resize.h;
2794             }
2795             break;
2796         case SDL_QUIT:
2797         case FF_QUIT_EVENT:
2798             do_exit();
2799             break;
2800         case FF_ALLOC_EVENT:
2801             video_open(event.user.data1);
2802             alloc_picture(event.user.data1);
2803             break;
2804         case FF_REFRESH_EVENT:
2805             video_refresh(event.user.data1);
2806             cur_stream->refresh=0;
2807             break;
2808         default:
2809             break;
2810         }
2811     }
2812 }
2813
2814 static int opt_frame_size(const char *opt, const char *arg)
2815 {
2816     if (av_parse_video_size(&frame_width, &frame_height, arg) < 0) {
2817         fprintf(stderr, "Incorrect frame size\n");
2818         return AVERROR(EINVAL);
2819     }
2820     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2821         fprintf(stderr, "Frame size must be a multiple of 2\n");
2822         return AVERROR(EINVAL);
2823     }
2824     return 0;
2825 }
2826
2827 static int opt_width(const char *opt, const char *arg)
2828 {
2829     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2830     return 0;
2831 }
2832
2833 static int opt_height(const char *opt, const char *arg)
2834 {
2835     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2836     return 0;
2837 }
2838
2839 static int opt_format(const char *opt, const char *arg)
2840 {
2841     file_iformat = av_find_input_format(arg);
2842     if (!file_iformat) {
2843         fprintf(stderr, "Unknown input format: %s\n", arg);
2844         return AVERROR(EINVAL);
2845     }
2846     return 0;
2847 }
2848
2849 static int opt_frame_pix_fmt(const char *opt, const char *arg)
2850 {
2851     frame_pix_fmt = av_get_pix_fmt(arg);
2852     return 0;
2853 }
2854
2855 static int opt_sync(const char *opt, const char *arg)
2856 {
2857     if (!strcmp(arg, "audio"))
2858         av_sync_type = AV_SYNC_AUDIO_MASTER;
2859     else if (!strcmp(arg, "video"))
2860         av_sync_type = AV_SYNC_VIDEO_MASTER;
2861     else if (!strcmp(arg, "ext"))
2862         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2863     else {
2864         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2865         exit(1);
2866     }
2867     return 0;
2868 }
2869
2870 static int opt_seek(const char *opt, const char *arg)
2871 {
2872     start_time = parse_time_or_die(opt, arg, 1);
2873     return 0;
2874 }
2875
2876 static int opt_duration(const char *opt, const char *arg)
2877 {
2878     duration = parse_time_or_die(opt, arg, 1);
2879     return 0;
2880 }
2881
2882 static int opt_thread_count(const char *opt, const char *arg)
2883 {
2884     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2885 #if !HAVE_THREADS
2886     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2887 #endif
2888     return 0;
2889 }
2890
2891 static int opt_show_mode(const char *opt, const char *arg)
2892 {
2893     show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
2894                 !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
2895                 !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT  :
2896                 parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
2897     return 0;
2898 }
2899
2900 static int opt_input_file(const char *opt, const char *filename)
2901 {
2902     if (input_filename) {
2903         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2904                 filename, input_filename);
2905         exit(1);
2906     }
2907     if (!strcmp(filename, "-"))
2908         filename = "pipe:";
2909     input_filename = filename;
2910     return 0;
2911 }
2912
2913 static const OptionDef options[] = {
2914 #include "cmdutils_common_opts.h"
2915     { "x", HAS_ARG, {(void*)opt_width}, "force displayed width", "width" },
2916     { "y", HAS_ARG, {(void*)opt_height}, "force displayed height", "height" },
2917     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2918     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2919     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2920     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2921     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2922     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2923     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[AVMEDIA_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2924     { "ss", HAS_ARG, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2925     { "t", HAS_ARG, {(void*)&opt_duration}, "play  \"duration\" seconds of audio/video", "duration" },
2926     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2927     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2928     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2929     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2930     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2931     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2932     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2933     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2934     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2935     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2936     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2937     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2938     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2939     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2940     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2941     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2942     { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2943     { "threads", HAS_ARG | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2944     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2945     { "exitonkeydown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_keydown}, "exit on key down", "" },
2946     { "exitonmousedown", OPT_BOOL | OPT_EXPERT, {(void*)&exit_on_mousedown}, "exit on mouse down", "" },
2947     { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&loop}, "set number of times the playback shall be looped", "loop count" },
2948     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2949     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2950 #if CONFIG_AVFILTER
2951     { "vf", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2952 #endif
2953     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2954     { "showmode", HAS_ARG, {(void*)opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
2955     { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2956     { "i", HAS_ARG, {(void *)opt_input_file}, "read specified file", "input_file"},
2957     { NULL, },
2958 };
2959
2960 static void show_usage(void)
2961 {
2962     printf("Simple media player\n");
2963     printf("usage: ffplay [options] input_file\n");
2964     printf("\n");
2965 }
2966
2967 static void show_help(void)
2968 {
2969     av_log_set_callback(log_callback_help);
2970     show_usage();
2971     show_help_options(options, "Main options:\n",
2972                       OPT_EXPERT, 0);
2973     show_help_options(options, "\nAdvanced options:\n",
2974                       OPT_EXPERT, OPT_EXPERT);
2975     printf("\n");
2976     av_opt_show2(avcodec_opts[0], NULL,
2977                  AV_OPT_FLAG_DECODING_PARAM, 0);
2978     printf("\n");
2979     av_opt_show2(avformat_opts, NULL,
2980                  AV_OPT_FLAG_DECODING_PARAM, 0);
2981 #if !CONFIG_AVFILTER
2982     printf("\n");
2983     av_opt_show2(sws_opts, NULL,
2984                  AV_OPT_FLAG_ENCODING_PARAM, 0);
2985 #endif
2986     printf("\nWhile playing:\n"
2987            "q, ESC              quit\n"
2988            "f                   toggle full screen\n"
2989            "p, SPC              pause\n"
2990            "a                   cycle audio channel\n"
2991            "v                   cycle video channel\n"
2992            "t                   cycle subtitle channel\n"
2993            "w                   show audio waves\n"
2994            "s                   activate frame-step mode\n"
2995            "left/right          seek backward/forward 10 seconds\n"
2996            "down/up             seek backward/forward 1 minute\n"
2997            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2998            );
2999 }
3000
3001 /* Called from the main */
3002 int main(int argc, char **argv)
3003 {
3004     int flags;
3005
3006     av_log_set_flags(AV_LOG_SKIP_REPEATED);
3007
3008     /* register all codecs, demux and protocols */
3009     avcodec_register_all();
3010 #if CONFIG_AVDEVICE
3011     avdevice_register_all();
3012 #endif
3013 #if CONFIG_AVFILTER
3014     avfilter_register_all();
3015 #endif
3016     av_register_all();
3017
3018     init_opts();
3019
3020     show_banner();
3021
3022     parse_options(argc, argv, options, opt_input_file);
3023
3024     if (!input_filename) {
3025         show_usage();
3026         fprintf(stderr, "An input file must be specified\n");
3027         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3028         exit(1);
3029     }
3030
3031     if (display_disable) {
3032         video_disable = 1;
3033     }
3034     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3035 #if !defined(__MINGW32__) && !defined(__APPLE__)
3036     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3037 #endif
3038     if (SDL_Init (flags)) {
3039         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3040         exit(1);
3041     }
3042
3043     if (!display_disable) {
3044 #if HAVE_SDL_VIDEO_SIZE
3045         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3046         fs_screen_width = vi->current_w;
3047         fs_screen_height = vi->current_h;
3048 #endif
3049     }
3050
3051     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3052     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3053     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3054
3055     av_init_packet(&flush_pkt);
3056     flush_pkt.data= "FLUSH";
3057
3058     cur_stream = stream_open(input_filename, file_iformat);
3059
3060     event_loop();
3061
3062     /* never returns */
3063
3064     return 0;
3065 }