Implement framedrop.
[ffmpeg.git] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum SampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static int fs_screen_width;
228 static int fs_screen_height;
229 static int screen_width = 0;
230 static int screen_height = 0;
231 static int frame_width = 0;
232 static int frame_height = 0;
233 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
234 static int audio_disable;
235 static int video_disable;
236 static int wanted_stream[CODEC_TYPE_NB]={
237     [CODEC_TYPE_AUDIO]=-1,
238     [CODEC_TYPE_VIDEO]=-1,
239     [CODEC_TYPE_SUBTITLE]=-1,
240 };
241 static int seek_by_bytes=-1;
242 static int display_disable;
243 static int show_status = 1;
244 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
245 static int64_t start_time = AV_NOPTS_VALUE;
246 static int debug = 0;
247 static int debug_mv = 0;
248 static int step = 0;
249 static int thread_count = 1;
250 static int workaround_bugs = 1;
251 static int fast = 0;
252 static int genpts = 0;
253 static int lowres = 0;
254 static int idct = FF_IDCT_AUTO;
255 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
256 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
258 static int error_recognition = FF_ER_CAREFUL;
259 static int error_concealment = 3;
260 static int decoder_reorder_pts= -1;
261 static int autoexit;
262 static int framedrop=1;
263 #if CONFIG_AVFILTER
264 static char *vfilters = NULL;
265 #endif
266
267 /* current context */
268 static int is_full_screen;
269 static VideoState *cur_stream;
270 static int64_t audio_callback_time;
271
272 static AVPacket flush_pkt;
273
274 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
275 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
276 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
277
278 static SDL_Surface *screen;
279
280 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
281
282 /* packet queue handling */
283 static void packet_queue_init(PacketQueue *q)
284 {
285     memset(q, 0, sizeof(PacketQueue));
286     q->mutex = SDL_CreateMutex();
287     q->cond = SDL_CreateCond();
288     packet_queue_put(q, &flush_pkt);
289 }
290
291 static void packet_queue_flush(PacketQueue *q)
292 {
293     AVPacketList *pkt, *pkt1;
294
295     SDL_LockMutex(q->mutex);
296     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
297         pkt1 = pkt->next;
298         av_free_packet(&pkt->pkt);
299         av_freep(&pkt);
300     }
301     q->last_pkt = NULL;
302     q->first_pkt = NULL;
303     q->nb_packets = 0;
304     q->size = 0;
305     SDL_UnlockMutex(q->mutex);
306 }
307
308 static void packet_queue_end(PacketQueue *q)
309 {
310     packet_queue_flush(q);
311     SDL_DestroyMutex(q->mutex);
312     SDL_DestroyCond(q->cond);
313 }
314
315 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
316 {
317     AVPacketList *pkt1;
318
319     /* duplicate the packet */
320     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
321         return -1;
322
323     pkt1 = av_malloc(sizeof(AVPacketList));
324     if (!pkt1)
325         return -1;
326     pkt1->pkt = *pkt;
327     pkt1->next = NULL;
328
329
330     SDL_LockMutex(q->mutex);
331
332     if (!q->last_pkt)
333
334         q->first_pkt = pkt1;
335     else
336         q->last_pkt->next = pkt1;
337     q->last_pkt = pkt1;
338     q->nb_packets++;
339     q->size += pkt1->pkt.size + sizeof(*pkt1);
340     /* XXX: should duplicate packet data in DV case */
341     SDL_CondSignal(q->cond);
342
343     SDL_UnlockMutex(q->mutex);
344     return 0;
345 }
346
347 static void packet_queue_abort(PacketQueue *q)
348 {
349     SDL_LockMutex(q->mutex);
350
351     q->abort_request = 1;
352
353     SDL_CondSignal(q->cond);
354
355     SDL_UnlockMutex(q->mutex);
356 }
357
358 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
359 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
360 {
361     AVPacketList *pkt1;
362     int ret;
363
364     SDL_LockMutex(q->mutex);
365
366     for(;;) {
367         if (q->abort_request) {
368             ret = -1;
369             break;
370         }
371
372         pkt1 = q->first_pkt;
373         if (pkt1) {
374             q->first_pkt = pkt1->next;
375             if (!q->first_pkt)
376                 q->last_pkt = NULL;
377             q->nb_packets--;
378             q->size -= pkt1->pkt.size + sizeof(*pkt1);
379             *pkt = pkt1->pkt;
380             av_free(pkt1);
381             ret = 1;
382             break;
383         } else if (!block) {
384             ret = 0;
385             break;
386         } else {
387             SDL_CondWait(q->cond, q->mutex);
388         }
389     }
390     SDL_UnlockMutex(q->mutex);
391     return ret;
392 }
393
394 static inline void fill_rectangle(SDL_Surface *screen,
395                                   int x, int y, int w, int h, int color)
396 {
397     SDL_Rect rect;
398     rect.x = x;
399     rect.y = y;
400     rect.w = w;
401     rect.h = h;
402     SDL_FillRect(screen, &rect, color);
403 }
404
405 #if 0
406 /* draw only the border of a rectangle */
407 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
408 {
409     int w1, w2, h1, h2;
410
411     /* fill the background */
412     w1 = x;
413     if (w1 < 0)
414         w1 = 0;
415     w2 = s->width - (x + w);
416     if (w2 < 0)
417         w2 = 0;
418     h1 = y;
419     if (h1 < 0)
420         h1 = 0;
421     h2 = s->height - (y + h);
422     if (h2 < 0)
423         h2 = 0;
424     fill_rectangle(screen,
425                    s->xleft, s->ytop,
426                    w1, s->height,
427                    color);
428     fill_rectangle(screen,
429                    s->xleft + s->width - w2, s->ytop,
430                    w2, s->height,
431                    color);
432     fill_rectangle(screen,
433                    s->xleft + w1, s->ytop,
434                    s->width - w1 - w2, h1,
435                    color);
436     fill_rectangle(screen,
437                    s->xleft + w1, s->ytop + s->height - h2,
438                    s->width - w1 - w2, h2,
439                    color);
440 }
441 #endif
442
443 #define ALPHA_BLEND(a, oldp, newp, s)\
444 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
445
446 #define RGBA_IN(r, g, b, a, s)\
447 {\
448     unsigned int v = ((const uint32_t *)(s))[0];\
449     a = (v >> 24) & 0xff;\
450     r = (v >> 16) & 0xff;\
451     g = (v >> 8) & 0xff;\
452     b = v & 0xff;\
453 }
454
455 #define YUVA_IN(y, u, v, a, s, pal)\
456 {\
457     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
458     a = (val >> 24) & 0xff;\
459     y = (val >> 16) & 0xff;\
460     u = (val >> 8) & 0xff;\
461     v = val & 0xff;\
462 }
463
464 #define YUVA_OUT(d, y, u, v, a)\
465 {\
466     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
467 }
468
469
470 #define BPP 1
471
472 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
473 {
474     int wrap, wrap3, width2, skip2;
475     int y, u, v, a, u1, v1, a1, w, h;
476     uint8_t *lum, *cb, *cr;
477     const uint8_t *p;
478     const uint32_t *pal;
479     int dstx, dsty, dstw, dsth;
480
481     dstw = av_clip(rect->w, 0, imgw);
482     dsth = av_clip(rect->h, 0, imgh);
483     dstx = av_clip(rect->x, 0, imgw - dstw);
484     dsty = av_clip(rect->y, 0, imgh - dsth);
485     lum = dst->data[0] + dsty * dst->linesize[0];
486     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
487     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
488
489     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
490     skip2 = dstx >> 1;
491     wrap = dst->linesize[0];
492     wrap3 = rect->pict.linesize[0];
493     p = rect->pict.data[0];
494     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
495
496     if (dsty & 1) {
497         lum += dstx;
498         cb += skip2;
499         cr += skip2;
500
501         if (dstx & 1) {
502             YUVA_IN(y, u, v, a, p, pal);
503             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
504             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
505             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
506             cb++;
507             cr++;
508             lum++;
509             p += BPP;
510         }
511         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
512             YUVA_IN(y, u, v, a, p, pal);
513             u1 = u;
514             v1 = v;
515             a1 = a;
516             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517
518             YUVA_IN(y, u, v, a, p + BPP, pal);
519             u1 += u;
520             v1 += v;
521             a1 += a;
522             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
523             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
524             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
525             cb++;
526             cr++;
527             p += 2 * BPP;
528             lum += 2;
529         }
530         if (w) {
531             YUVA_IN(y, u, v, a, p, pal);
532             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
533             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
534             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
535             p++;
536             lum++;
537         }
538         p += wrap3 - dstw * BPP;
539         lum += wrap - dstw - dstx;
540         cb += dst->linesize[1] - width2 - skip2;
541         cr += dst->linesize[2] - width2 - skip2;
542     }
543     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
544         lum += dstx;
545         cb += skip2;
546         cr += skip2;
547
548         if (dstx & 1) {
549             YUVA_IN(y, u, v, a, p, pal);
550             u1 = u;
551             v1 = v;
552             a1 = a;
553             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
554             p += wrap3;
555             lum += wrap;
556             YUVA_IN(y, u, v, a, p, pal);
557             u1 += u;
558             v1 += v;
559             a1 += a;
560             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
561             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
562             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
563             cb++;
564             cr++;
565             p += -wrap3 + BPP;
566             lum += -wrap + 1;
567         }
568         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
569             YUVA_IN(y, u, v, a, p, pal);
570             u1 = u;
571             v1 = v;
572             a1 = a;
573             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
574
575             YUVA_IN(y, u, v, a, p + BPP, pal);
576             u1 += u;
577             v1 += v;
578             a1 += a;
579             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
580             p += wrap3;
581             lum += wrap;
582
583             YUVA_IN(y, u, v, a, p, pal);
584             u1 += u;
585             v1 += v;
586             a1 += a;
587             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
588
589             YUVA_IN(y, u, v, a, p + BPP, pal);
590             u1 += u;
591             v1 += v;
592             a1 += a;
593             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
594
595             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
596             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
597
598             cb++;
599             cr++;
600             p += -wrap3 + 2 * BPP;
601             lum += -wrap + 2;
602         }
603         if (w) {
604             YUVA_IN(y, u, v, a, p, pal);
605             u1 = u;
606             v1 = v;
607             a1 = a;
608             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
609             p += wrap3;
610             lum += wrap;
611             YUVA_IN(y, u, v, a, p, pal);
612             u1 += u;
613             v1 += v;
614             a1 += a;
615             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
616             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
617             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
618             cb++;
619             cr++;
620             p += -wrap3 + BPP;
621             lum += -wrap + 1;
622         }
623         p += wrap3 + (wrap3 - dstw * BPP);
624         lum += wrap + (wrap - dstw - dstx);
625         cb += dst->linesize[1] - width2 - skip2;
626         cr += dst->linesize[2] - width2 - skip2;
627     }
628     /* handle odd height */
629     if (h) {
630         lum += dstx;
631         cb += skip2;
632         cr += skip2;
633
634         if (dstx & 1) {
635             YUVA_IN(y, u, v, a, p, pal);
636             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
637             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
638             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
639             cb++;
640             cr++;
641             lum++;
642             p += BPP;
643         }
644         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
645             YUVA_IN(y, u, v, a, p, pal);
646             u1 = u;
647             v1 = v;
648             a1 = a;
649             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
650
651             YUVA_IN(y, u, v, a, p + BPP, pal);
652             u1 += u;
653             v1 += v;
654             a1 += a;
655             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
656             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
657             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
658             cb++;
659             cr++;
660             p += 2 * BPP;
661             lum += 2;
662         }
663         if (w) {
664             YUVA_IN(y, u, v, a, p, pal);
665             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
666             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
667             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
668         }
669     }
670 }
671
672 static void free_subpicture(SubPicture *sp)
673 {
674     int i;
675
676     for (i = 0; i < sp->sub.num_rects; i++)
677     {
678         av_freep(&sp->sub.rects[i]->pict.data[0]);
679         av_freep(&sp->sub.rects[i]->pict.data[1]);
680         av_freep(&sp->sub.rects[i]);
681     }
682
683     av_free(sp->sub.rects);
684
685     memset(&sp->sub, 0, sizeof(AVSubtitle));
686 }
687
688 static void video_image_display(VideoState *is)
689 {
690     VideoPicture *vp;
691     SubPicture *sp;
692     AVPicture pict;
693     float aspect_ratio;
694     int width, height, x, y;
695     SDL_Rect rect;
696     int i;
697
698     vp = &is->pictq[is->pictq_rindex];
699     if (vp->bmp) {
700 #if CONFIG_AVFILTER
701          if (vp->picref->pixel_aspect.num == 0)
702              aspect_ratio = 0;
703          else
704              aspect_ratio = av_q2d(vp->picref->pixel_aspect);
705 #else
706
707         /* XXX: use variable in the frame */
708         if (is->video_st->sample_aspect_ratio.num)
709             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
710         else if (is->video_st->codec->sample_aspect_ratio.num)
711             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
712         else
713             aspect_ratio = 0;
714 #endif
715         if (aspect_ratio <= 0.0)
716             aspect_ratio = 1.0;
717         aspect_ratio *= (float)vp->width / (float)vp->height;
718         /* if an active format is indicated, then it overrides the
719            mpeg format */
720 #if 0
721         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
722             is->dtg_active_format = is->video_st->codec->dtg_active_format;
723             printf("dtg_active_format=%d\n", is->dtg_active_format);
724         }
725 #endif
726 #if 0
727         switch(is->video_st->codec->dtg_active_format) {
728         case FF_DTG_AFD_SAME:
729         default:
730             /* nothing to do */
731             break;
732         case FF_DTG_AFD_4_3:
733             aspect_ratio = 4.0 / 3.0;
734             break;
735         case FF_DTG_AFD_16_9:
736             aspect_ratio = 16.0 / 9.0;
737             break;
738         case FF_DTG_AFD_14_9:
739             aspect_ratio = 14.0 / 9.0;
740             break;
741         case FF_DTG_AFD_4_3_SP_14_9:
742             aspect_ratio = 14.0 / 9.0;
743             break;
744         case FF_DTG_AFD_16_9_SP_14_9:
745             aspect_ratio = 14.0 / 9.0;
746             break;
747         case FF_DTG_AFD_SP_4_3:
748             aspect_ratio = 4.0 / 3.0;
749             break;
750         }
751 #endif
752
753         if (is->subtitle_st)
754         {
755             if (is->subpq_size > 0)
756             {
757                 sp = &is->subpq[is->subpq_rindex];
758
759                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
760                 {
761                     SDL_LockYUVOverlay (vp->bmp);
762
763                     pict.data[0] = vp->bmp->pixels[0];
764                     pict.data[1] = vp->bmp->pixels[2];
765                     pict.data[2] = vp->bmp->pixels[1];
766
767                     pict.linesize[0] = vp->bmp->pitches[0];
768                     pict.linesize[1] = vp->bmp->pitches[2];
769                     pict.linesize[2] = vp->bmp->pitches[1];
770
771                     for (i = 0; i < sp->sub.num_rects; i++)
772                         blend_subrect(&pict, sp->sub.rects[i],
773                                       vp->bmp->w, vp->bmp->h);
774
775                     SDL_UnlockYUVOverlay (vp->bmp);
776                 }
777             }
778         }
779
780
781         /* XXX: we suppose the screen has a 1.0 pixel ratio */
782         height = is->height;
783         width = ((int)rint(height * aspect_ratio)) & ~1;
784         if (width > is->width) {
785             width = is->width;
786             height = ((int)rint(width / aspect_ratio)) & ~1;
787         }
788         x = (is->width - width) / 2;
789         y = (is->height - height) / 2;
790         if (!is->no_background) {
791             /* fill the background */
792             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
793         } else {
794             is->no_background = 0;
795         }
796         rect.x = is->xleft + x;
797         rect.y = is->ytop  + y;
798         rect.w = width;
799         rect.h = height;
800         SDL_DisplayYUVOverlay(vp->bmp, &rect);
801     } else {
802 #if 0
803         fill_rectangle(screen,
804                        is->xleft, is->ytop, is->width, is->height,
805                        QERGB(0x00, 0x00, 0x00));
806 #endif
807     }
808 }
809
810 static inline int compute_mod(int a, int b)
811 {
812     a = a % b;
813     if (a >= 0)
814         return a;
815     else
816         return a + b;
817 }
818
819 static void video_audio_display(VideoState *s)
820 {
821     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
822     int ch, channels, h, h2, bgcolor, fgcolor;
823     int16_t time_diff;
824     int rdft_bits, nb_freq;
825
826     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
827         ;
828     nb_freq= 1<<(rdft_bits-1);
829
830     /* compute display index : center on currently output samples */
831     channels = s->audio_st->codec->channels;
832     nb_display_channels = channels;
833     if (!s->paused) {
834         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
835         n = 2 * channels;
836         delay = audio_write_get_buf_size(s);
837         delay /= n;
838
839         /* to be more precise, we take into account the time spent since
840            the last buffer computation */
841         if (audio_callback_time) {
842             time_diff = av_gettime() - audio_callback_time;
843             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
844         }
845
846         delay += 2*data_used;
847         if (delay < data_used)
848             delay = data_used;
849
850         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
851         if(s->show_audio==1){
852             h= INT_MIN;
853             for(i=0; i<1000; i+=channels){
854                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
855                 int a= s->sample_array[idx];
856                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
857                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
858                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
859                 int score= a-d;
860                 if(h<score && (b^c)<0){
861                     h= score;
862                     i_start= idx;
863                 }
864             }
865         }
866
867         s->last_i_start = i_start;
868     } else {
869         i_start = s->last_i_start;
870     }
871
872     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
873     if(s->show_audio==1){
874         fill_rectangle(screen,
875                        s->xleft, s->ytop, s->width, s->height,
876                        bgcolor);
877
878         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
879
880         /* total height for one channel */
881         h = s->height / nb_display_channels;
882         /* graph height / 2 */
883         h2 = (h * 9) / 20;
884         for(ch = 0;ch < nb_display_channels; ch++) {
885             i = i_start + ch;
886             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
887             for(x = 0; x < s->width; x++) {
888                 y = (s->sample_array[i] * h2) >> 15;
889                 if (y < 0) {
890                     y = -y;
891                     ys = y1 - y;
892                 } else {
893                     ys = y1;
894                 }
895                 fill_rectangle(screen,
896                                s->xleft + x, ys, 1, y,
897                                fgcolor);
898                 i += channels;
899                 if (i >= SAMPLE_ARRAY_SIZE)
900                     i -= SAMPLE_ARRAY_SIZE;
901             }
902         }
903
904         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
905
906         for(ch = 1;ch < nb_display_channels; ch++) {
907             y = s->ytop + ch * h;
908             fill_rectangle(screen,
909                            s->xleft, y, s->width, 1,
910                            fgcolor);
911         }
912         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
913     }else{
914         nb_display_channels= FFMIN(nb_display_channels, 2);
915         if(rdft_bits != s->rdft_bits){
916             av_rdft_end(s->rdft);
917             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
918             s->rdft_bits= rdft_bits;
919         }
920         {
921             FFTSample data[2][2*nb_freq];
922             for(ch = 0;ch < nb_display_channels; ch++) {
923                 i = i_start + ch;
924                 for(x = 0; x < 2*nb_freq; x++) {
925                     double w= (x-nb_freq)*(1.0/nb_freq);
926                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
927                     i += channels;
928                     if (i >= SAMPLE_ARRAY_SIZE)
929                         i -= SAMPLE_ARRAY_SIZE;
930                 }
931                 av_rdft_calc(s->rdft, data[ch]);
932             }
933             //least efficient way to do this, we should of course directly access it but its more than fast enough
934             for(y=0; y<s->height; y++){
935                 double w= 1/sqrt(nb_freq);
936                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
937                 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
938                 a= FFMIN(a,255);
939                 b= FFMIN(b,255);
940                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
941
942                 fill_rectangle(screen,
943                             s->xpos, s->height-y, 1, 1,
944                             fgcolor);
945             }
946         }
947         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
948         s->xpos++;
949         if(s->xpos >= s->width)
950             s->xpos= s->xleft;
951     }
952 }
953
954 static int video_open(VideoState *is){
955     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
956     int w,h;
957
958     if(is_full_screen) flags |= SDL_FULLSCREEN;
959     else               flags |= SDL_RESIZABLE;
960
961     if (is_full_screen && fs_screen_width) {
962         w = fs_screen_width;
963         h = fs_screen_height;
964     } else if(!is_full_screen && screen_width){
965         w = screen_width;
966         h = screen_height;
967 #if CONFIG_AVFILTER
968     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
969         w = is->out_video_filter->inputs[0]->w;
970         h = is->out_video_filter->inputs[0]->h;
971 #else
972     }else if (is->video_st && is->video_st->codec->width){
973         w = is->video_st->codec->width;
974         h = is->video_st->codec->height;
975 #endif
976     } else {
977         w = 640;
978         h = 480;
979     }
980     if(screen && is->width == screen->w && screen->w == w
981        && is->height== screen->h && screen->h == h)
982         return 0;
983
984 #ifndef __APPLE__
985     screen = SDL_SetVideoMode(w, h, 0, flags);
986 #else
987     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
988     screen = SDL_SetVideoMode(w, h, 24, flags);
989 #endif
990     if (!screen) {
991         fprintf(stderr, "SDL: could not set video mode - exiting\n");
992         return -1;
993     }
994     SDL_WM_SetCaption("FFplay", "FFplay");
995
996     is->width = screen->w;
997     is->height = screen->h;
998
999     return 0;
1000 }
1001
1002 /* display the current picture, if any */
1003 static void video_display(VideoState *is)
1004 {
1005     if(!screen)
1006         video_open(cur_stream);
1007     if (is->audio_st && is->show_audio)
1008         video_audio_display(is);
1009     else if (is->video_st)
1010         video_image_display(is);
1011 }
1012
1013 static int refresh_thread(void *opaque)
1014 {
1015     VideoState *is= opaque;
1016     while(!is->abort_request){
1017     SDL_Event event;
1018     event.type = FF_REFRESH_EVENT;
1019     event.user.data1 = opaque;
1020         if(!is->refresh){
1021             is->refresh=1;
1022     SDL_PushEvent(&event);
1023         }
1024         usleep(5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1025     }
1026     return 0;
1027 }
1028
1029 /* get the current audio clock value */
1030 static double get_audio_clock(VideoState *is)
1031 {
1032     double pts;
1033     int hw_buf_size, bytes_per_sec;
1034     pts = is->audio_clock;
1035     hw_buf_size = audio_write_get_buf_size(is);
1036     bytes_per_sec = 0;
1037     if (is->audio_st) {
1038         bytes_per_sec = is->audio_st->codec->sample_rate *
1039             2 * is->audio_st->codec->channels;
1040     }
1041     if (bytes_per_sec)
1042         pts -= (double)hw_buf_size / bytes_per_sec;
1043     return pts;
1044 }
1045
1046 /* get the current video clock value */
1047 static double get_video_clock(VideoState *is)
1048 {
1049     if (is->paused) {
1050         return is->video_current_pts;
1051     } else {
1052         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1053     }
1054 }
1055
1056 /* get the current external clock value */
1057 static double get_external_clock(VideoState *is)
1058 {
1059     int64_t ti;
1060     ti = av_gettime();
1061     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1062 }
1063
1064 /* get the current master clock value */
1065 static double get_master_clock(VideoState *is)
1066 {
1067     double val;
1068
1069     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1070         if (is->video_st)
1071             val = get_video_clock(is);
1072         else
1073             val = get_audio_clock(is);
1074     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1075         if (is->audio_st)
1076             val = get_audio_clock(is);
1077         else
1078             val = get_video_clock(is);
1079     } else {
1080         val = get_external_clock(is);
1081     }
1082     return val;
1083 }
1084
1085 /* seek in the stream */
1086 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1087 {
1088     if (!is->seek_req) {
1089         is->seek_pos = pos;
1090         is->seek_rel = rel;
1091         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1092         if (seek_by_bytes)
1093             is->seek_flags |= AVSEEK_FLAG_BYTE;
1094         is->seek_req = 1;
1095     }
1096 }
1097
1098 /* pause or resume the video */
1099 static void stream_pause(VideoState *is)
1100 {
1101     if (is->paused) {
1102         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1103         if(is->read_pause_return != AVERROR(ENOSYS)){
1104             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1105         }
1106         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1107     }
1108     is->paused = !is->paused;
1109 }
1110
1111 static double compute_target_time(double frame_current_pts, VideoState *is)
1112 {
1113     double delay, sync_threshold, diff;
1114
1115     /* compute nominal delay */
1116     delay = frame_current_pts - is->frame_last_pts;
1117     if (delay <= 0 || delay >= 10.0) {
1118         /* if incorrect delay, use previous one */
1119         delay = is->frame_last_delay;
1120     } else {
1121         is->frame_last_delay = delay;
1122     }
1123     is->frame_last_pts = frame_current_pts;
1124
1125     /* update delay to follow master synchronisation source */
1126     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1127          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1128         /* if video is slave, we try to correct big delays by
1129            duplicating or deleting a frame */
1130         diff = get_video_clock(is) - get_master_clock(is);
1131
1132         /* skip or repeat frame. We take into account the
1133            delay to compute the threshold. I still don't know
1134            if it is the best guess */
1135         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1136         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1137             if (diff <= -sync_threshold)
1138                 delay = 0;
1139             else if (diff >= sync_threshold)
1140                 delay = 2 * delay;
1141         }
1142     }
1143     is->frame_timer += delay;
1144 #if defined(DEBUG_SYNC)
1145     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1146             delay, actual_delay, frame_current_pts, -diff);
1147 #endif
1148
1149     return is->frame_timer;
1150 }
1151
1152 /* called to display each frame */
1153 static void video_refresh_timer(void *opaque)
1154 {
1155     VideoState *is = opaque;
1156     VideoPicture *vp;
1157
1158     SubPicture *sp, *sp2;
1159
1160     if (is->video_st) {
1161 retry:
1162         if (is->pictq_size == 0) {
1163             //nothing to do, no picture to display in the que
1164         } else {
1165             double time= av_gettime()/1000000.0;
1166             double next_target;
1167             /* dequeue the picture */
1168             vp = &is->pictq[is->pictq_rindex];
1169
1170             if(time < vp->target_clock)
1171                 return;
1172             /* update current video pts */
1173             is->video_current_pts = vp->pts;
1174             is->video_current_pts_drift = is->video_current_pts - time;
1175             is->video_current_pos = vp->pos;
1176             if(is->pictq_size > 1){
1177                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1178                 assert(nextvp->target_clock >= vp->target_clock);
1179                 next_target= nextvp->target_clock;
1180             }else{
1181                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1182             }
1183             if(framedrop && time > next_target){
1184                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1185                 if(is->pictq_size > 1 || time > next_target + 0.5){
1186                     /* update queue size and signal for next picture */
1187                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1188                         is->pictq_rindex = 0;
1189
1190                     SDL_LockMutex(is->pictq_mutex);
1191                     is->pictq_size--;
1192                     SDL_CondSignal(is->pictq_cond);
1193                     SDL_UnlockMutex(is->pictq_mutex);
1194                     goto retry;
1195                 }
1196             }
1197
1198             if(is->subtitle_st) {
1199                 if (is->subtitle_stream_changed) {
1200                     SDL_LockMutex(is->subpq_mutex);
1201
1202                     while (is->subpq_size) {
1203                         free_subpicture(&is->subpq[is->subpq_rindex]);
1204
1205                         /* update queue size and signal for next picture */
1206                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1207                             is->subpq_rindex = 0;
1208
1209                         is->subpq_size--;
1210                     }
1211                     is->subtitle_stream_changed = 0;
1212
1213                     SDL_CondSignal(is->subpq_cond);
1214                     SDL_UnlockMutex(is->subpq_mutex);
1215                 } else {
1216                     if (is->subpq_size > 0) {
1217                         sp = &is->subpq[is->subpq_rindex];
1218
1219                         if (is->subpq_size > 1)
1220                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1221                         else
1222                             sp2 = NULL;
1223
1224                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1225                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1226                         {
1227                             free_subpicture(sp);
1228
1229                             /* update queue size and signal for next picture */
1230                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1231                                 is->subpq_rindex = 0;
1232
1233                             SDL_LockMutex(is->subpq_mutex);
1234                             is->subpq_size--;
1235                             SDL_CondSignal(is->subpq_cond);
1236                             SDL_UnlockMutex(is->subpq_mutex);
1237                         }
1238                     }
1239                 }
1240             }
1241
1242             /* display picture */
1243             video_display(is);
1244
1245             /* update queue size and signal for next picture */
1246             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1247                 is->pictq_rindex = 0;
1248
1249             SDL_LockMutex(is->pictq_mutex);
1250             is->pictq_size--;
1251             SDL_CondSignal(is->pictq_cond);
1252             SDL_UnlockMutex(is->pictq_mutex);
1253         }
1254     } else if (is->audio_st) {
1255         /* draw the next audio frame */
1256
1257         /* if only audio stream, then display the audio bars (better
1258            than nothing, just to test the implementation */
1259
1260         /* display picture */
1261         video_display(is);
1262     }
1263     if (show_status) {
1264         static int64_t last_time;
1265         int64_t cur_time;
1266         int aqsize, vqsize, sqsize;
1267         double av_diff;
1268
1269         cur_time = av_gettime();
1270         if (!last_time || (cur_time - last_time) >= 30000) {
1271             aqsize = 0;
1272             vqsize = 0;
1273             sqsize = 0;
1274             if (is->audio_st)
1275                 aqsize = is->audioq.size;
1276             if (is->video_st)
1277                 vqsize = is->videoq.size;
1278             if (is->subtitle_st)
1279                 sqsize = is->subtitleq.size;
1280             av_diff = 0;
1281             if (is->audio_st && is->video_st)
1282                 av_diff = get_audio_clock(is) - get_video_clock(is);
1283             printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1284                    get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1285             fflush(stdout);
1286             last_time = cur_time;
1287         }
1288     }
1289 }
1290
1291 /* allocate a picture (needs to do that in main thread to avoid
1292    potential locking problems */
1293 static void alloc_picture(void *opaque)
1294 {
1295     VideoState *is = opaque;
1296     VideoPicture *vp;
1297
1298     vp = &is->pictq[is->pictq_windex];
1299
1300     if (vp->bmp)
1301         SDL_FreeYUVOverlay(vp->bmp);
1302
1303 #if CONFIG_AVFILTER
1304     if (vp->picref)
1305         avfilter_unref_pic(vp->picref);
1306     vp->picref = NULL;
1307
1308     vp->width   = is->out_video_filter->inputs[0]->w;
1309     vp->height  = is->out_video_filter->inputs[0]->h;
1310     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1311 #else
1312     vp->width   = is->video_st->codec->width;
1313     vp->height  = is->video_st->codec->height;
1314     vp->pix_fmt = is->video_st->codec->pix_fmt;
1315 #endif
1316
1317     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1318                                    SDL_YV12_OVERLAY,
1319                                    screen);
1320
1321     SDL_LockMutex(is->pictq_mutex);
1322     vp->allocated = 1;
1323     SDL_CondSignal(is->pictq_cond);
1324     SDL_UnlockMutex(is->pictq_mutex);
1325 }
1326
1327 /**
1328  *
1329  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1330  */
1331 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1332 {
1333     VideoPicture *vp;
1334     int dst_pix_fmt;
1335 #if CONFIG_AVFILTER
1336     AVPicture pict_src;
1337 #endif
1338     /* wait until we have space to put a new picture */
1339     SDL_LockMutex(is->pictq_mutex);
1340
1341     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1342         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1343
1344     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1345            !is->videoq.abort_request) {
1346         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1347     }
1348     SDL_UnlockMutex(is->pictq_mutex);
1349
1350     if (is->videoq.abort_request)
1351         return -1;
1352
1353     vp = &is->pictq[is->pictq_windex];
1354
1355     /* alloc or resize hardware picture buffer */
1356     if (!vp->bmp ||
1357 #if CONFIG_AVFILTER
1358         vp->width  != is->out_video_filter->inputs[0]->w ||
1359         vp->height != is->out_video_filter->inputs[0]->h) {
1360 #else
1361         vp->width != is->video_st->codec->width ||
1362         vp->height != is->video_st->codec->height) {
1363 #endif
1364         SDL_Event event;
1365
1366         vp->allocated = 0;
1367
1368         /* the allocation must be done in the main thread to avoid
1369            locking problems */
1370         event.type = FF_ALLOC_EVENT;
1371         event.user.data1 = is;
1372         SDL_PushEvent(&event);
1373
1374         /* wait until the picture is allocated */
1375         SDL_LockMutex(is->pictq_mutex);
1376         while (!vp->allocated && !is->videoq.abort_request) {
1377             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1378         }
1379         SDL_UnlockMutex(is->pictq_mutex);
1380
1381         if (is->videoq.abort_request)
1382             return -1;
1383     }
1384
1385     /* if the frame is not skipped, then display it */
1386     if (vp->bmp) {
1387         AVPicture pict;
1388 #if CONFIG_AVFILTER
1389         if(vp->picref)
1390             avfilter_unref_pic(vp->picref);
1391         vp->picref = src_frame->opaque;
1392 #endif
1393
1394         /* get a pointer on the bitmap */
1395         SDL_LockYUVOverlay (vp->bmp);
1396
1397         dst_pix_fmt = PIX_FMT_YUV420P;
1398         memset(&pict,0,sizeof(AVPicture));
1399         pict.data[0] = vp->bmp->pixels[0];
1400         pict.data[1] = vp->bmp->pixels[2];
1401         pict.data[2] = vp->bmp->pixels[1];
1402
1403         pict.linesize[0] = vp->bmp->pitches[0];
1404         pict.linesize[1] = vp->bmp->pitches[2];
1405         pict.linesize[2] = vp->bmp->pitches[1];
1406
1407 #if CONFIG_AVFILTER
1408         pict_src.data[0] = src_frame->data[0];
1409         pict_src.data[1] = src_frame->data[1];
1410         pict_src.data[2] = src_frame->data[2];
1411
1412         pict_src.linesize[0] = src_frame->linesize[0];
1413         pict_src.linesize[1] = src_frame->linesize[1];
1414         pict_src.linesize[2] = src_frame->linesize[2];
1415
1416         //FIXME use direct rendering
1417         av_picture_copy(&pict, &pict_src,
1418                         vp->pix_fmt, vp->width, vp->height);
1419 #else
1420         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1421         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1422             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1423             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1424         if (is->img_convert_ctx == NULL) {
1425             fprintf(stderr, "Cannot initialize the conversion context\n");
1426             exit(1);
1427         }
1428         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1429                   0, vp->height, pict.data, pict.linesize);
1430 #endif
1431         /* update the bitmap content */
1432         SDL_UnlockYUVOverlay(vp->bmp);
1433
1434         vp->pts = pts;
1435         vp->pos = pos;
1436
1437         /* now we can update the picture count */
1438         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1439             is->pictq_windex = 0;
1440         SDL_LockMutex(is->pictq_mutex);
1441         vp->target_clock= compute_target_time(vp->pts, is);
1442
1443         is->pictq_size++;
1444         SDL_UnlockMutex(is->pictq_mutex);
1445     }
1446     return 0;
1447 }
1448
1449 /**
1450  * compute the exact PTS for the picture if it is omitted in the stream
1451  * @param pts1 the dts of the pkt / pts of the frame
1452  */
1453 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1454 {
1455     double frame_delay, pts;
1456
1457     pts = pts1;
1458
1459     if (pts != 0) {
1460         /* update video clock with pts, if present */
1461         is->video_clock = pts;
1462     } else {
1463         pts = is->video_clock;
1464     }
1465     /* update video clock for next frame */
1466     frame_delay = av_q2d(is->video_st->codec->time_base);
1467     /* for MPEG2, the frame can be repeated, so we update the
1468        clock accordingly */
1469     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1470     is->video_clock += frame_delay;
1471
1472 #if defined(DEBUG_SYNC) && 0
1473     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1474            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1475 #endif
1476     return queue_picture(is, src_frame, pts, pos);
1477 }
1478
1479 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1480 {
1481     int len1, got_picture, i;
1482
1483         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1484             return -1;
1485
1486         if(pkt->data == flush_pkt.data){
1487             avcodec_flush_buffers(is->video_st->codec);
1488
1489             SDL_LockMutex(is->pictq_mutex);
1490             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1491             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1492                 is->pictq[i].target_clock= 0;
1493             }
1494             while (is->pictq_size && !is->videoq.abort_request) {
1495                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1496             }
1497             is->video_current_pos= -1;
1498             SDL_UnlockMutex(is->pictq_mutex);
1499
1500             is->last_dts_for_fault_detection=
1501             is->last_pts_for_fault_detection= INT64_MIN;
1502             is->frame_last_pts= AV_NOPTS_VALUE;
1503             is->frame_last_delay = 0;
1504             is->frame_timer = (double)av_gettime() / 1000000.0;
1505             is->skip_frames= 1;
1506             is->skip_frames_index= 0;
1507             return 0;
1508         }
1509
1510         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1511            this packet, if any */
1512         is->video_st->codec->reordered_opaque= pkt->pts;
1513         len1 = avcodec_decode_video2(is->video_st->codec,
1514                                     frame, &got_picture,
1515                                     pkt);
1516
1517         if (got_picture) {
1518             if(pkt->dts != AV_NOPTS_VALUE){
1519                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1520                 is->last_dts_for_fault_detection= pkt->dts;
1521             }
1522             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1523                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1524                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1525             }
1526         }
1527
1528         if(   (   decoder_reorder_pts==1
1529                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1530                || pkt->dts == AV_NOPTS_VALUE)
1531            && frame->reordered_opaque != AV_NOPTS_VALUE)
1532             *pts= frame->reordered_opaque;
1533         else if(pkt->dts != AV_NOPTS_VALUE)
1534             *pts= pkt->dts;
1535         else
1536             *pts= 0;
1537
1538 //            if (len1 < 0)
1539 //                break;
1540     if (got_picture){
1541         is->skip_frames_index += 1;
1542         if(is->skip_frames_index >= is->skip_frames){
1543             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1544             return 1;
1545         }
1546
1547     }
1548     return 0;
1549 }
1550
1551 #if CONFIG_AVFILTER
1552 typedef struct {
1553     VideoState *is;
1554     AVFrame *frame;
1555 } FilterPriv;
1556
1557 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1558 {
1559     FilterPriv *priv = ctx->priv;
1560     if(!opaque) return -1;
1561
1562     priv->is = opaque;
1563     priv->frame = avcodec_alloc_frame();
1564
1565     return 0;
1566 }
1567
1568 static void input_uninit(AVFilterContext *ctx)
1569 {
1570     FilterPriv *priv = ctx->priv;
1571     av_free(priv->frame);
1572 }
1573
1574 static int input_request_frame(AVFilterLink *link)
1575 {
1576     FilterPriv *priv = link->src->priv;
1577     AVFilterPicRef *picref;
1578     int64_t pts = 0;
1579     AVPacket pkt;
1580     int ret;
1581
1582     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1583         av_free_packet(&pkt);
1584     if (ret < 0)
1585         return -1;
1586
1587     /* FIXME: until I figure out how to hook everything up to the codec
1588      * right, we're just copying the entire frame. */
1589     picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1590     av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1591                     picref->pic->format, link->w, link->h);
1592     av_free_packet(&pkt);
1593
1594     picref->pts = pts;
1595     picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1596     avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1597     avfilter_draw_slice(link, 0, link->h, 1);
1598     avfilter_end_frame(link);
1599     avfilter_unref_pic(picref);
1600
1601     return 0;
1602 }
1603
1604 static int input_query_formats(AVFilterContext *ctx)
1605 {
1606     FilterPriv *priv = ctx->priv;
1607     enum PixelFormat pix_fmts[] = {
1608         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1609     };
1610
1611     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1612     return 0;
1613 }
1614
1615 static int input_config_props(AVFilterLink *link)
1616 {
1617     FilterPriv *priv  = link->src->priv;
1618     AVCodecContext *c = priv->is->video_st->codec;
1619
1620     link->w = c->width;
1621     link->h = c->height;
1622
1623     return 0;
1624 }
1625
1626 static AVFilter input_filter =
1627 {
1628     .name      = "ffplay_input",
1629
1630     .priv_size = sizeof(FilterPriv),
1631
1632     .init      = input_init,
1633     .uninit    = input_uninit,
1634
1635     .query_formats = input_query_formats,
1636
1637     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1638     .outputs   = (AVFilterPad[]) {{ .name = "default",
1639                                     .type = CODEC_TYPE_VIDEO,
1640                                     .request_frame = input_request_frame,
1641                                     .config_props  = input_config_props, },
1642                                   { .name = NULL }},
1643 };
1644
1645 static void output_end_frame(AVFilterLink *link)
1646 {
1647 }
1648
1649 static int output_query_formats(AVFilterContext *ctx)
1650 {
1651     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1652
1653     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1654     return 0;
1655 }
1656
1657 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1658                                     int64_t *pts)
1659 {
1660     AVFilterPicRef *pic;
1661
1662     if(avfilter_request_frame(ctx->inputs[0]))
1663         return -1;
1664     if(!(pic = ctx->inputs[0]->cur_pic))
1665         return -1;
1666     ctx->inputs[0]->cur_pic = NULL;
1667
1668     frame->opaque = pic;
1669     *pts          = pic->pts;
1670
1671     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1672     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1673
1674     return 1;
1675 }
1676
1677 static AVFilter output_filter =
1678 {
1679     .name      = "ffplay_output",
1680
1681     .query_formats = output_query_formats,
1682
1683     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1684                                     .type          = CODEC_TYPE_VIDEO,
1685                                     .end_frame     = output_end_frame,
1686                                     .min_perms     = AV_PERM_READ, },
1687                                   { .name = NULL }},
1688     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1689 };
1690 #endif  /* CONFIG_AVFILTER */
1691
1692 static int video_thread(void *arg)
1693 {
1694     VideoState *is = arg;
1695     AVFrame *frame= avcodec_alloc_frame();
1696     int64_t pts_int;
1697     double pts;
1698     int ret;
1699
1700 #if CONFIG_AVFILTER
1701     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1702     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1703     graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1704
1705     if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1706     if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1707
1708     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1709     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1710
1711
1712     if(vfilters) {
1713         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1714         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1715
1716         outputs->name    = av_strdup("in");
1717         outputs->filter  = filt_src;
1718         outputs->pad_idx = 0;
1719         outputs->next    = NULL;
1720
1721         inputs->name    = av_strdup("out");
1722         inputs->filter  = filt_out;
1723         inputs->pad_idx = 0;
1724         inputs->next    = NULL;
1725
1726         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1727             goto the_end;
1728         av_freep(&vfilters);
1729     } else {
1730         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1731     }
1732     avfilter_graph_add_filter(graph, filt_src);
1733     avfilter_graph_add_filter(graph, filt_out);
1734
1735     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1736     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1737     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1738
1739     is->out_video_filter = filt_out;
1740 #endif
1741
1742     for(;;) {
1743 #if !CONFIG_AVFILTER
1744         AVPacket pkt;
1745 #endif
1746         while (is->paused && !is->videoq.abort_request)
1747             SDL_Delay(10);
1748 #if CONFIG_AVFILTER
1749         ret = get_filtered_video_frame(filt_out, frame, &pts_int);
1750 #else
1751         ret = get_video_frame(is, frame, &pts_int, &pkt);
1752 #endif
1753
1754         if (ret < 0) goto the_end;
1755
1756         if (!ret)
1757             continue;
1758
1759         pts = pts_int*av_q2d(is->video_st->time_base);
1760
1761 #if CONFIG_AVFILTER
1762         ret = output_picture2(is, frame, pts,  -1); /* fixme: unknown pos */
1763 #else
1764         ret = output_picture2(is, frame, pts,  pkt.pos);
1765         av_free_packet(&pkt);
1766 #endif
1767         if (ret < 0)
1768             goto the_end;
1769
1770         if (step)
1771             if (cur_stream)
1772                 stream_pause(cur_stream);
1773     }
1774  the_end:
1775 #if CONFIG_AVFILTER
1776     avfilter_graph_destroy(graph);
1777     av_freep(&graph);
1778 #endif
1779     av_free(frame);
1780     return 0;
1781 }
1782
1783 static int subtitle_thread(void *arg)
1784 {
1785     VideoState *is = arg;
1786     SubPicture *sp;
1787     AVPacket pkt1, *pkt = &pkt1;
1788     int len1, got_subtitle;
1789     double pts;
1790     int i, j;
1791     int r, g, b, y, u, v, a;
1792
1793     for(;;) {
1794         while (is->paused && !is->subtitleq.abort_request) {
1795             SDL_Delay(10);
1796         }
1797         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1798             break;
1799
1800         if(pkt->data == flush_pkt.data){
1801             avcodec_flush_buffers(is->subtitle_st->codec);
1802             continue;
1803         }
1804         SDL_LockMutex(is->subpq_mutex);
1805         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1806                !is->subtitleq.abort_request) {
1807             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1808         }
1809         SDL_UnlockMutex(is->subpq_mutex);
1810
1811         if (is->subtitleq.abort_request)
1812             goto the_end;
1813
1814         sp = &is->subpq[is->subpq_windex];
1815
1816        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1817            this packet, if any */
1818         pts = 0;
1819         if (pkt->pts != AV_NOPTS_VALUE)
1820             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1821
1822         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1823                                     &sp->sub, &got_subtitle,
1824                                     pkt);
1825 //            if (len1 < 0)
1826 //                break;
1827         if (got_subtitle && sp->sub.format == 0) {
1828             sp->pts = pts;
1829
1830             for (i = 0; i < sp->sub.num_rects; i++)
1831             {
1832                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1833                 {
1834                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1835                     y = RGB_TO_Y_CCIR(r, g, b);
1836                     u = RGB_TO_U_CCIR(r, g, b, 0);
1837                     v = RGB_TO_V_CCIR(r, g, b, 0);
1838                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1839                 }
1840             }
1841
1842             /* now we can update the picture count */
1843             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1844                 is->subpq_windex = 0;
1845             SDL_LockMutex(is->subpq_mutex);
1846             is->subpq_size++;
1847             SDL_UnlockMutex(is->subpq_mutex);
1848         }
1849         av_free_packet(pkt);
1850 //        if (step)
1851 //            if (cur_stream)
1852 //                stream_pause(cur_stream);
1853     }
1854  the_end:
1855     return 0;
1856 }
1857
1858 /* copy samples for viewing in editor window */
1859 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1860 {
1861     int size, len, channels;
1862
1863     channels = is->audio_st->codec->channels;
1864
1865     size = samples_size / sizeof(short);
1866     while (size > 0) {
1867         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1868         if (len > size)
1869             len = size;
1870         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1871         samples += len;
1872         is->sample_array_index += len;
1873         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1874             is->sample_array_index = 0;
1875         size -= len;
1876     }
1877 }
1878
1879 /* return the new audio buffer size (samples can be added or deleted
1880    to get better sync if video or external master clock) */
1881 static int synchronize_audio(VideoState *is, short *samples,
1882                              int samples_size1, double pts)
1883 {
1884     int n, samples_size;
1885     double ref_clock;
1886
1887     n = 2 * is->audio_st->codec->channels;
1888     samples_size = samples_size1;
1889
1890     /* if not master, then we try to remove or add samples to correct the clock */
1891     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1892          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1893         double diff, avg_diff;
1894         int wanted_size, min_size, max_size, nb_samples;
1895
1896         ref_clock = get_master_clock(is);
1897         diff = get_audio_clock(is) - ref_clock;
1898
1899         if (diff < AV_NOSYNC_THRESHOLD) {
1900             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1901             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1902                 /* not enough measures to have a correct estimate */
1903                 is->audio_diff_avg_count++;
1904             } else {
1905                 /* estimate the A-V difference */
1906                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1907
1908                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1909                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1910                     nb_samples = samples_size / n;
1911
1912                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1913                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1914                     if (wanted_size < min_size)
1915                         wanted_size = min_size;
1916                     else if (wanted_size > max_size)
1917                         wanted_size = max_size;
1918
1919                     /* add or remove samples to correction the synchro */
1920                     if (wanted_size < samples_size) {
1921                         /* remove samples */
1922                         samples_size = wanted_size;
1923                     } else if (wanted_size > samples_size) {
1924                         uint8_t *samples_end, *q;
1925                         int nb;
1926
1927                         /* add samples */
1928                         nb = (samples_size - wanted_size);
1929                         samples_end = (uint8_t *)samples + samples_size - n;
1930                         q = samples_end + n;
1931                         while (nb > 0) {
1932                             memcpy(q, samples_end, n);
1933                             q += n;
1934                             nb -= n;
1935                         }
1936                         samples_size = wanted_size;
1937                     }
1938                 }
1939 #if 0
1940                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1941                        diff, avg_diff, samples_size - samples_size1,
1942                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1943 #endif
1944             }
1945         } else {
1946             /* too big difference : may be initial PTS errors, so
1947                reset A-V filter */
1948             is->audio_diff_avg_count = 0;
1949             is->audio_diff_cum = 0;
1950         }
1951     }
1952
1953     return samples_size;
1954 }
1955
1956 /* decode one audio frame and returns its uncompressed size */
1957 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1958 {
1959     AVPacket *pkt_temp = &is->audio_pkt_temp;
1960     AVPacket *pkt = &is->audio_pkt;
1961     AVCodecContext *dec= is->audio_st->codec;
1962     int n, len1, data_size;
1963     double pts;
1964
1965     for(;;) {
1966         /* NOTE: the audio packet can contain several frames */
1967         while (pkt_temp->size > 0) {
1968             data_size = sizeof(is->audio_buf1);
1969             len1 = avcodec_decode_audio3(dec,
1970                                         (int16_t *)is->audio_buf1, &data_size,
1971                                         pkt_temp);
1972             if (len1 < 0) {
1973                 /* if error, we skip the frame */
1974                 pkt_temp->size = 0;
1975                 break;
1976             }
1977
1978             pkt_temp->data += len1;
1979             pkt_temp->size -= len1;
1980             if (data_size <= 0)
1981                 continue;
1982
1983             if (dec->sample_fmt != is->audio_src_fmt) {
1984                 if (is->reformat_ctx)
1985                     av_audio_convert_free(is->reformat_ctx);
1986                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1987                                                          dec->sample_fmt, 1, NULL, 0);
1988                 if (!is->reformat_ctx) {
1989                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1990                         avcodec_get_sample_fmt_name(dec->sample_fmt),
1991                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
1992                         break;
1993                 }
1994                 is->audio_src_fmt= dec->sample_fmt;
1995             }
1996
1997             if (is->reformat_ctx) {
1998                 const void *ibuf[6]= {is->audio_buf1};
1999                 void *obuf[6]= {is->audio_buf2};
2000                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2001                 int ostride[6]= {2};
2002                 int len= data_size/istride[0];
2003                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2004                     printf("av_audio_convert() failed\n");
2005                     break;
2006                 }
2007                 is->audio_buf= is->audio_buf2;
2008                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2009                           remove this legacy cruft */
2010                 data_size= len*2;
2011             }else{
2012                 is->audio_buf= is->audio_buf1;
2013             }
2014
2015             /* if no pts, then compute it */
2016             pts = is->audio_clock;
2017             *pts_ptr = pts;
2018             n = 2 * dec->channels;
2019             is->audio_clock += (double)data_size /
2020                 (double)(n * dec->sample_rate);
2021 #if defined(DEBUG_SYNC)
2022             {
2023                 static double last_clock;
2024                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2025                        is->audio_clock - last_clock,
2026                        is->audio_clock, pts);
2027                 last_clock = is->audio_clock;
2028             }
2029 #endif
2030             return data_size;
2031         }
2032
2033         /* free the current packet */
2034         if (pkt->data)
2035             av_free_packet(pkt);
2036
2037         if (is->paused || is->audioq.abort_request) {
2038             return -1;
2039         }
2040
2041         /* read next packet */
2042         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2043             return -1;
2044         if(pkt->data == flush_pkt.data){
2045             avcodec_flush_buffers(dec);
2046             continue;
2047         }
2048
2049         pkt_temp->data = pkt->data;
2050         pkt_temp->size = pkt->size;
2051
2052         /* if update the audio clock with the pts */
2053         if (pkt->pts != AV_NOPTS_VALUE) {
2054             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2055         }
2056     }
2057 }
2058
2059 /* get the current audio output buffer size, in samples. With SDL, we
2060    cannot have a precise information */
2061 static int audio_write_get_buf_size(VideoState *is)
2062 {
2063     return is->audio_buf_size - is->audio_buf_index;
2064 }
2065
2066
2067 /* prepare a new audio buffer */
2068 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2069 {
2070     VideoState *is = opaque;
2071     int audio_size, len1;
2072     double pts;
2073
2074     audio_callback_time = av_gettime();
2075
2076     while (len > 0) {
2077         if (is->audio_buf_index >= is->audio_buf_size) {
2078            audio_size = audio_decode_frame(is, &pts);
2079            if (audio_size < 0) {
2080                 /* if error, just output silence */
2081                is->audio_buf = is->audio_buf1;
2082                is->audio_buf_size = 1024;
2083                memset(is->audio_buf, 0, is->audio_buf_size);
2084            } else {
2085                if (is->show_audio)
2086                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2087                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2088                                               pts);
2089                is->audio_buf_size = audio_size;
2090            }
2091            is->audio_buf_index = 0;
2092         }
2093         len1 = is->audio_buf_size - is->audio_buf_index;
2094         if (len1 > len)
2095             len1 = len;
2096         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2097         len -= len1;
2098         stream += len1;
2099         is->audio_buf_index += len1;
2100     }
2101 }
2102
2103 /* open a given stream. Return 0 if OK */
2104 static int stream_component_open(VideoState *is, int stream_index)
2105 {
2106     AVFormatContext *ic = is->ic;
2107     AVCodecContext *avctx;
2108     AVCodec *codec;
2109     SDL_AudioSpec wanted_spec, spec;
2110
2111     if (stream_index < 0 || stream_index >= ic->nb_streams)
2112         return -1;
2113     avctx = ic->streams[stream_index]->codec;
2114
2115     /* prepare audio output */
2116     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2117         if (avctx->channels > 0) {
2118             avctx->request_channels = FFMIN(2, avctx->channels);
2119         } else {
2120             avctx->request_channels = 2;
2121         }
2122     }
2123
2124     codec = avcodec_find_decoder(avctx->codec_id);
2125     avctx->debug_mv = debug_mv;
2126     avctx->debug = debug;
2127     avctx->workaround_bugs = workaround_bugs;
2128     avctx->lowres = lowres;
2129     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2130     avctx->idct_algo= idct;
2131     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2132     avctx->skip_frame= skip_frame;
2133     avctx->skip_idct= skip_idct;
2134     avctx->skip_loop_filter= skip_loop_filter;
2135     avctx->error_recognition= error_recognition;
2136     avctx->error_concealment= error_concealment;
2137     avcodec_thread_init(avctx, thread_count);
2138
2139     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2140
2141     if (!codec ||
2142         avcodec_open(avctx, codec) < 0)
2143         return -1;
2144
2145     /* prepare audio output */
2146     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2147         wanted_spec.freq = avctx->sample_rate;
2148         wanted_spec.format = AUDIO_S16SYS;
2149         wanted_spec.channels = avctx->channels;
2150         wanted_spec.silence = 0;
2151         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2152         wanted_spec.callback = sdl_audio_callback;
2153         wanted_spec.userdata = is;
2154         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2155             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2156             return -1;
2157         }
2158         is->audio_hw_buf_size = spec.size;
2159         is->audio_src_fmt= SAMPLE_FMT_S16;
2160     }
2161
2162     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2163     switch(avctx->codec_type) {
2164     case CODEC_TYPE_AUDIO:
2165         is->audio_stream = stream_index;
2166         is->audio_st = ic->streams[stream_index];
2167         is->audio_buf_size = 0;
2168         is->audio_buf_index = 0;
2169
2170         /* init averaging filter */
2171         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2172         is->audio_diff_avg_count = 0;
2173         /* since we do not have a precise anough audio fifo fullness,
2174            we correct audio sync only if larger than this threshold */
2175         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2176
2177         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2178         packet_queue_init(&is->audioq);
2179         SDL_PauseAudio(0);
2180         break;
2181     case CODEC_TYPE_VIDEO:
2182         is->video_stream = stream_index;
2183         is->video_st = ic->streams[stream_index];
2184
2185 //        is->video_current_pts_time = av_gettime();
2186
2187         packet_queue_init(&is->videoq);
2188         is->video_tid = SDL_CreateThread(video_thread, is);
2189         break;
2190     case CODEC_TYPE_SUBTITLE:
2191         is->subtitle_stream = stream_index;
2192         is->subtitle_st = ic->streams[stream_index];
2193         packet_queue_init(&is->subtitleq);
2194
2195         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2196         break;
2197     default:
2198         break;
2199     }
2200     return 0;
2201 }
2202
2203 static void stream_component_close(VideoState *is, int stream_index)
2204 {
2205     AVFormatContext *ic = is->ic;
2206     AVCodecContext *avctx;
2207
2208     if (stream_index < 0 || stream_index >= ic->nb_streams)
2209         return;
2210     avctx = ic->streams[stream_index]->codec;
2211
2212     switch(avctx->codec_type) {
2213     case CODEC_TYPE_AUDIO:
2214         packet_queue_abort(&is->audioq);
2215
2216         SDL_CloseAudio();
2217
2218         packet_queue_end(&is->audioq);
2219         if (is->reformat_ctx)
2220             av_audio_convert_free(is->reformat_ctx);
2221         is->reformat_ctx = NULL;
2222         break;
2223     case CODEC_TYPE_VIDEO:
2224         packet_queue_abort(&is->videoq);
2225
2226         /* note: we also signal this mutex to make sure we deblock the
2227            video thread in all cases */
2228         SDL_LockMutex(is->pictq_mutex);
2229         SDL_CondSignal(is->pictq_cond);
2230         SDL_UnlockMutex(is->pictq_mutex);
2231
2232         SDL_WaitThread(is->video_tid, NULL);
2233
2234         packet_queue_end(&is->videoq);
2235         break;
2236     case CODEC_TYPE_SUBTITLE:
2237         packet_queue_abort(&is->subtitleq);
2238
2239         /* note: we also signal this mutex to make sure we deblock the
2240            video thread in all cases */
2241         SDL_LockMutex(is->subpq_mutex);
2242         is->subtitle_stream_changed = 1;
2243
2244         SDL_CondSignal(is->subpq_cond);
2245         SDL_UnlockMutex(is->subpq_mutex);
2246
2247         SDL_WaitThread(is->subtitle_tid, NULL);
2248
2249         packet_queue_end(&is->subtitleq);
2250         break;
2251     default:
2252         break;
2253     }
2254
2255     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2256     avcodec_close(avctx);
2257     switch(avctx->codec_type) {
2258     case CODEC_TYPE_AUDIO:
2259         is->audio_st = NULL;
2260         is->audio_stream = -1;
2261         break;
2262     case CODEC_TYPE_VIDEO:
2263         is->video_st = NULL;
2264         is->video_stream = -1;
2265         break;
2266     case CODEC_TYPE_SUBTITLE:
2267         is->subtitle_st = NULL;
2268         is->subtitle_stream = -1;
2269         break;
2270     default:
2271         break;
2272     }
2273 }
2274
2275 /* since we have only one decoding thread, we can use a global
2276    variable instead of a thread local variable */
2277 static VideoState *global_video_state;
2278
2279 static int decode_interrupt_cb(void)
2280 {
2281     return (global_video_state && global_video_state->abort_request);
2282 }
2283
2284 /* this thread gets the stream from the disk or the network */
2285 static int decode_thread(void *arg)
2286 {
2287     VideoState *is = arg;
2288     AVFormatContext *ic;
2289     int err, i, ret;
2290     int st_index[CODEC_TYPE_NB];
2291     int st_count[CODEC_TYPE_NB]={0};
2292     int st_best_packet_count[CODEC_TYPE_NB];
2293     AVPacket pkt1, *pkt = &pkt1;
2294     AVFormatParameters params, *ap = &params;
2295     int eof=0;
2296
2297     ic = avformat_alloc_context();
2298
2299     memset(st_index, -1, sizeof(st_index));
2300     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2301     is->video_stream = -1;
2302     is->audio_stream = -1;
2303     is->subtitle_stream = -1;
2304
2305     global_video_state = is;
2306     url_set_interrupt_cb(decode_interrupt_cb);
2307
2308     memset(ap, 0, sizeof(*ap));
2309
2310     ap->prealloced_context = 1;
2311     ap->width = frame_width;
2312     ap->height= frame_height;
2313     ap->time_base= (AVRational){1, 25};
2314     ap->pix_fmt = frame_pix_fmt;
2315
2316     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2317
2318     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2319     if (err < 0) {
2320         print_error(is->filename, err);
2321         ret = -1;
2322         goto fail;
2323     }
2324     is->ic = ic;
2325
2326     if(genpts)
2327         ic->flags |= AVFMT_FLAG_GENPTS;
2328
2329     err = av_find_stream_info(ic);
2330     if (err < 0) {
2331         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2332         ret = -1;
2333         goto fail;
2334     }
2335     if(ic->pb)
2336         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2337
2338     if(seek_by_bytes<0)
2339         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2340
2341     /* if seeking requested, we execute it */
2342     if (start_time != AV_NOPTS_VALUE) {
2343         int64_t timestamp;
2344
2345         timestamp = start_time;
2346         /* add the stream start time */
2347         if (ic->start_time != AV_NOPTS_VALUE)
2348             timestamp += ic->start_time;
2349         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2350         if (ret < 0) {
2351             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2352                     is->filename, (double)timestamp / AV_TIME_BASE);
2353         }
2354     }
2355
2356     for(i = 0; i < ic->nb_streams; i++) {
2357         AVStream *st= ic->streams[i];
2358         AVCodecContext *avctx = st->codec;
2359         ic->streams[i]->discard = AVDISCARD_ALL;
2360         if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2361             continue;
2362         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2363             continue;
2364
2365         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2366             continue;
2367         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2368
2369         switch(avctx->codec_type) {
2370         case CODEC_TYPE_AUDIO:
2371             if (!audio_disable)
2372                 st_index[CODEC_TYPE_AUDIO] = i;
2373             break;
2374         case CODEC_TYPE_VIDEO:
2375         case CODEC_TYPE_SUBTITLE:
2376             if (!video_disable)
2377                 st_index[avctx->codec_type] = i;
2378             break;
2379         default:
2380             break;
2381         }
2382     }
2383     if (show_status) {
2384         dump_format(ic, 0, is->filename, 0);
2385     }
2386
2387     /* open the streams */
2388     if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2389         stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2390     }
2391
2392     ret=-1;
2393     if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2394         ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2395     }
2396     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2397     if(ret<0) {
2398         if (!display_disable)
2399             is->show_audio = 2;
2400     }
2401
2402     if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2403         stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2404     }
2405
2406     if (is->video_stream < 0 && is->audio_stream < 0) {
2407         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2408         ret = -1;
2409         goto fail;
2410     }
2411
2412     for(;;) {
2413         if (is->abort_request)
2414             break;
2415         if (is->paused != is->last_paused) {
2416             is->last_paused = is->paused;
2417             if (is->paused)
2418                 is->read_pause_return= av_read_pause(ic);
2419             else
2420                 av_read_play(ic);
2421         }
2422 #if CONFIG_RTSP_DEMUXER
2423         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2424             /* wait 10 ms to avoid trying to get another packet */
2425             /* XXX: horrible */
2426             SDL_Delay(10);
2427             continue;
2428         }
2429 #endif
2430         if (is->seek_req) {
2431             int64_t seek_target= is->seek_pos;
2432             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2433             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2434 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2435 //      of the seek_pos/seek_rel variables
2436
2437             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2438             if (ret < 0) {
2439                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2440             }else{
2441                 if (is->audio_stream >= 0) {
2442                     packet_queue_flush(&is->audioq);
2443                     packet_queue_put(&is->audioq, &flush_pkt);
2444                 }
2445                 if (is->subtitle_stream >= 0) {
2446                     packet_queue_flush(&is->subtitleq);
2447                     packet_queue_put(&is->subtitleq, &flush_pkt);
2448                 }
2449                 if (is->video_stream >= 0) {
2450                     packet_queue_flush(&is->videoq);
2451                     packet_queue_put(&is->videoq, &flush_pkt);
2452                 }
2453             }
2454             is->seek_req = 0;
2455             eof= 0;
2456         }
2457
2458         /* if the queue are full, no need to read more */
2459         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2460             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2461                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2462                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2463             /* wait 10 ms */
2464             SDL_Delay(10);
2465             continue;
2466         }
2467         if(url_feof(ic->pb) || eof) {
2468             if(is->video_stream >= 0){
2469                 av_init_packet(pkt);
2470                 pkt->data=NULL;
2471                 pkt->size=0;
2472                 pkt->stream_index= is->video_stream;
2473                 packet_queue_put(&is->videoq, pkt);
2474             }
2475             SDL_Delay(10);
2476             if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2477                 ret=AVERROR_EOF;
2478                 goto fail;
2479             }
2480             continue;
2481         }
2482         ret = av_read_frame(ic, pkt);
2483         if (ret < 0) {
2484             if (ret == AVERROR_EOF)
2485                 eof=1;
2486             if (url_ferror(ic->pb))
2487                 break;
2488             SDL_Delay(100); /* wait for user event */
2489             continue;
2490         }
2491         if (pkt->stream_index == is->audio_stream) {
2492             packet_queue_put(&is->audioq, pkt);
2493         } else if (pkt->stream_index == is->video_stream) {
2494             packet_queue_put(&is->videoq, pkt);
2495         } else if (pkt->stream_index == is->subtitle_stream) {
2496             packet_queue_put(&is->subtitleq, pkt);
2497         } else {
2498             av_free_packet(pkt);
2499         }
2500     }
2501     /* wait until the end */
2502     while (!is->abort_request) {
2503         SDL_Delay(100);
2504     }
2505
2506     ret = 0;
2507  fail:
2508     /* disable interrupting */
2509     global_video_state = NULL;
2510
2511     /* close each stream */
2512     if (is->audio_stream >= 0)
2513         stream_component_close(is, is->audio_stream);
2514     if (is->video_stream >= 0)
2515         stream_component_close(is, is->video_stream);
2516     if (is->subtitle_stream >= 0)
2517         stream_component_close(is, is->subtitle_stream);
2518     if (is->ic) {
2519         av_close_input_file(is->ic);
2520         is->ic = NULL; /* safety */
2521     }
2522     url_set_interrupt_cb(NULL);
2523
2524     if (ret != 0) {
2525         SDL_Event event;
2526
2527         event.type = FF_QUIT_EVENT;
2528         event.user.data1 = is;
2529         SDL_PushEvent(&event);
2530     }
2531     return 0;
2532 }
2533
2534 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2535 {
2536     VideoState *is;
2537
2538     is = av_mallocz(sizeof(VideoState));
2539     if (!is)
2540         return NULL;
2541     av_strlcpy(is->filename, filename, sizeof(is->filename));
2542     is->iformat = iformat;
2543     is->ytop = 0;
2544     is->xleft = 0;
2545
2546     /* start video display */
2547     is->pictq_mutex = SDL_CreateMutex();
2548     is->pictq_cond = SDL_CreateCond();
2549
2550     is->subpq_mutex = SDL_CreateMutex();
2551     is->subpq_cond = SDL_CreateCond();
2552
2553     is->av_sync_type = av_sync_type;
2554     is->parse_tid = SDL_CreateThread(decode_thread, is);
2555     if (!is->parse_tid) {
2556         av_free(is);
2557         return NULL;
2558     }
2559     return is;
2560 }
2561
2562 static void stream_close(VideoState *is)
2563 {
2564     VideoPicture *vp;
2565     int i;
2566     /* XXX: use a special url_shutdown call to abort parse cleanly */
2567     is->abort_request = 1;
2568     SDL_WaitThread(is->parse_tid, NULL);
2569     SDL_WaitThread(is->refresh_tid, NULL);
2570
2571     /* free all pictures */
2572     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2573         vp = &is->pictq[i];
2574 #if CONFIG_AVFILTER
2575         if (vp->picref) {
2576             avfilter_unref_pic(vp->picref);
2577             vp->picref = NULL;
2578         }
2579 #endif
2580         if (vp->bmp) {
2581             SDL_FreeYUVOverlay(vp->bmp);
2582             vp->bmp = NULL;
2583         }
2584     }
2585     SDL_DestroyMutex(is->pictq_mutex);
2586     SDL_DestroyCond(is->pictq_cond);
2587     SDL_DestroyMutex(is->subpq_mutex);
2588     SDL_DestroyCond(is->subpq_cond);
2589 #if !CONFIG_AVFILTER
2590     if (is->img_convert_ctx)
2591         sws_freeContext(is->img_convert_ctx);
2592 #endif
2593     av_free(is);
2594 }
2595
2596 static void stream_cycle_channel(VideoState *is, int codec_type)
2597 {
2598     AVFormatContext *ic = is->ic;
2599     int start_index, stream_index;
2600     AVStream *st;
2601
2602     if (codec_type == CODEC_TYPE_VIDEO)
2603         start_index = is->video_stream;
2604     else if (codec_type == CODEC_TYPE_AUDIO)
2605         start_index = is->audio_stream;
2606     else
2607         start_index = is->subtitle_stream;
2608     if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2609         return;
2610     stream_index = start_index;
2611     for(;;) {
2612         if (++stream_index >= is->ic->nb_streams)
2613         {
2614             if (codec_type == CODEC_TYPE_SUBTITLE)
2615             {
2616                 stream_index = -1;
2617                 goto the_end;
2618             } else
2619                 stream_index = 0;
2620         }
2621         if (stream_index == start_index)
2622             return;
2623         st = ic->streams[stream_index];
2624         if (st->codec->codec_type == codec_type) {
2625             /* check that parameters are OK */
2626             switch(codec_type) {
2627             case CODEC_TYPE_AUDIO:
2628                 if (st->codec->sample_rate != 0 &&
2629                     st->codec->channels != 0)
2630                     goto the_end;
2631                 break;
2632             case CODEC_TYPE_VIDEO:
2633             case CODEC_TYPE_SUBTITLE:
2634                 goto the_end;
2635             default:
2636                 break;
2637             }
2638         }
2639     }
2640  the_end:
2641     stream_component_close(is, start_index);
2642     stream_component_open(is, stream_index);
2643 }
2644
2645
2646 static void toggle_full_screen(void)
2647 {
2648     is_full_screen = !is_full_screen;
2649     if (!fs_screen_width) {
2650         /* use default SDL method */
2651 //        SDL_WM_ToggleFullScreen(screen);
2652     }
2653     video_open(cur_stream);
2654 }
2655
2656 static void toggle_pause(void)
2657 {
2658     if (cur_stream)
2659         stream_pause(cur_stream);
2660     step = 0;
2661 }
2662
2663 static void step_to_next_frame(void)
2664 {
2665     if (cur_stream) {
2666         /* if the stream is paused unpause it, then step */
2667         if (cur_stream->paused)
2668             stream_pause(cur_stream);
2669     }
2670     step = 1;
2671 }
2672
2673 static void do_exit(void)
2674 {
2675     int i;
2676     if (cur_stream) {
2677         stream_close(cur_stream);
2678         cur_stream = NULL;
2679     }
2680     for (i = 0; i < CODEC_TYPE_NB; i++)
2681         av_free(avcodec_opts[i]);
2682     av_free(avformat_opts);
2683     av_free(sws_opts);
2684 #if CONFIG_AVFILTER
2685     avfilter_uninit();
2686 #endif
2687     if (show_status)
2688         printf("\n");
2689     SDL_Quit();
2690     exit(0);
2691 }
2692
2693 static void toggle_audio_display(void)
2694 {
2695     if (cur_stream) {
2696         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2697         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2698         fill_rectangle(screen,
2699                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2700                     bgcolor);
2701         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2702     }
2703 }
2704
2705 /* handle an event sent by the GUI */
2706 static void event_loop(void)
2707 {
2708     SDL_Event event;
2709     double incr, pos, frac;
2710
2711     for(;;) {
2712         double x;
2713         SDL_WaitEvent(&event);
2714         switch(event.type) {
2715         case SDL_KEYDOWN:
2716             switch(event.key.keysym.sym) {
2717             case SDLK_ESCAPE:
2718             case SDLK_q:
2719                 do_exit();
2720                 break;
2721             case SDLK_f:
2722                 toggle_full_screen();
2723                 break;
2724             case SDLK_p:
2725             case SDLK_SPACE:
2726                 toggle_pause();
2727                 break;
2728             case SDLK_s: //S: Step to next frame
2729                 step_to_next_frame();
2730                 break;
2731             case SDLK_a:
2732                 if (cur_stream)
2733                     stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2734                 break;
2735             case SDLK_v:
2736                 if (cur_stream)
2737                     stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2738                 break;
2739             case SDLK_t:
2740                 if (cur_stream)
2741                     stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2742                 break;
2743             case SDLK_w:
2744                 toggle_audio_display();
2745                 break;
2746             case SDLK_LEFT:
2747                 incr = -10.0;
2748                 goto do_seek;
2749             case SDLK_RIGHT:
2750                 incr = 10.0;
2751                 goto do_seek;
2752             case SDLK_UP:
2753                 incr = 60.0;
2754                 goto do_seek;
2755             case SDLK_DOWN:
2756                 incr = -60.0;
2757             do_seek:
2758                 if (cur_stream) {
2759                     if (seek_by_bytes) {
2760                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2761                             pos= cur_stream->video_current_pos;
2762                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2763                             pos= cur_stream->audio_pkt.pos;
2764                         }else
2765                             pos = url_ftell(cur_stream->ic->pb);
2766                         if (cur_stream->ic->bit_rate)
2767                             incr *= cur_stream->ic->bit_rate / 8.0;
2768                         else
2769                             incr *= 180000.0;
2770                         pos += incr;
2771                         stream_seek(cur_stream, pos, incr, 1);
2772                     } else {
2773                         pos = get_master_clock(cur_stream);
2774                         pos += incr;
2775                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2776                     }
2777                 }
2778                 break;
2779             default:
2780                 break;
2781             }
2782             break;
2783         case SDL_MOUSEBUTTONDOWN:
2784         case SDL_MOUSEMOTION:
2785             if(event.type ==SDL_MOUSEBUTTONDOWN){
2786                 x= event.button.x;
2787             }else{
2788                 if(event.motion.state != SDL_PRESSED)
2789                     break;
2790                 x= event.motion.x;
2791             }
2792             if (cur_stream) {
2793                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2794                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2795                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2796                 }else{
2797                     int64_t ts;
2798                     int ns, hh, mm, ss;
2799                     int tns, thh, tmm, tss;
2800                     tns = cur_stream->ic->duration/1000000LL;
2801                     thh = tns/3600;
2802                     tmm = (tns%3600)/60;
2803                     tss = (tns%60);
2804                     frac = x/cur_stream->width;
2805                     ns = frac*tns;
2806                     hh = ns/3600;
2807                     mm = (ns%3600)/60;
2808                     ss = (ns%60);
2809                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2810                             hh, mm, ss, thh, tmm, tss);
2811                     ts = frac*cur_stream->ic->duration;
2812                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2813                         ts += cur_stream->ic->start_time;
2814                     stream_seek(cur_stream, ts, 0, 0);
2815                 }
2816             }
2817             break;
2818         case SDL_VIDEORESIZE:
2819             if (cur_stream) {
2820                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2821                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2822                 screen_width = cur_stream->width = event.resize.w;
2823                 screen_height= cur_stream->height= event.resize.h;
2824             }
2825             break;
2826         case SDL_QUIT:
2827         case FF_QUIT_EVENT:
2828             do_exit();
2829             break;
2830         case FF_ALLOC_EVENT:
2831             video_open(event.user.data1);
2832             alloc_picture(event.user.data1);
2833             break;
2834         case FF_REFRESH_EVENT:
2835             video_refresh_timer(event.user.data1);
2836             cur_stream->refresh=0;
2837             break;
2838         default:
2839             break;
2840         }
2841     }
2842 }
2843
2844 static void opt_frame_size(const char *arg)
2845 {
2846     if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2847         fprintf(stderr, "Incorrect frame size\n");
2848         exit(1);
2849     }
2850     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2851         fprintf(stderr, "Frame size must be a multiple of 2\n");
2852         exit(1);
2853     }
2854 }
2855
2856 static int opt_width(const char *opt, const char *arg)
2857 {
2858     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2859     return 0;
2860 }
2861
2862 static int opt_height(const char *opt, const char *arg)
2863 {
2864     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2865     return 0;
2866 }
2867
2868 static void opt_format(const char *arg)
2869 {
2870     file_iformat = av_find_input_format(arg);
2871     if (!file_iformat) {
2872         fprintf(stderr, "Unknown input format: %s\n", arg);
2873         exit(1);
2874     }
2875 }
2876
2877 static void opt_frame_pix_fmt(const char *arg)
2878 {
2879     frame_pix_fmt = av_get_pix_fmt(arg);
2880 }
2881
2882 static int opt_sync(const char *opt, const char *arg)
2883 {
2884     if (!strcmp(arg, "audio"))
2885         av_sync_type = AV_SYNC_AUDIO_MASTER;
2886     else if (!strcmp(arg, "video"))
2887         av_sync_type = AV_SYNC_VIDEO_MASTER;
2888     else if (!strcmp(arg, "ext"))
2889         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2890     else {
2891         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2892         exit(1);
2893     }
2894     return 0;
2895 }
2896
2897 static int opt_seek(const char *opt, const char *arg)
2898 {
2899     start_time = parse_time_or_die(opt, arg, 1);
2900     return 0;
2901 }
2902
2903 static int opt_debug(const char *opt, const char *arg)
2904 {
2905     av_log_set_level(99);
2906     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2907     return 0;
2908 }
2909
2910 static int opt_vismv(const char *opt, const char *arg)
2911 {
2912     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2913     return 0;
2914 }
2915
2916 static int opt_thread_count(const char *opt, const char *arg)
2917 {
2918     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2919 #if !HAVE_THREADS
2920     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2921 #endif
2922     return 0;
2923 }
2924
2925 static const OptionDef options[] = {
2926 #include "cmdutils_common_opts.h"
2927     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2928     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2929     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2930     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2931     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2932     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2933     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2934     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2935     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2936     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2937     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2938     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2939     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2940     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2941     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2942     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2943     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2944     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2945     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2946     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2947     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2948     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2949     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2950     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2951     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2952     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2953     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2954     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2955     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2956     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2957     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2958     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2959 #if CONFIG_AVFILTER
2960     { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2961 #endif
2962     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2963     { NULL, },
2964 };
2965
2966 static void show_usage(void)
2967 {
2968     printf("Simple media player\n");
2969     printf("usage: ffplay [options] input_file\n");
2970     printf("\n");
2971 }
2972
2973 static void show_help(void)
2974 {
2975     show_usage();
2976     show_help_options(options, "Main options:\n",
2977                       OPT_EXPERT, 0);
2978     show_help_options(options, "\nAdvanced options:\n",
2979                       OPT_EXPERT, OPT_EXPERT);
2980     printf("\nWhile playing:\n"
2981            "q, ESC              quit\n"
2982            "f                   toggle full screen\n"
2983            "p, SPC              pause\n"
2984            "a                   cycle audio channel\n"
2985            "v                   cycle video channel\n"
2986            "t                   cycle subtitle channel\n"
2987            "w                   show audio waves\n"
2988            "left/right          seek backward/forward 10 seconds\n"
2989            "down/up             seek backward/forward 1 minute\n"
2990            "mouse click         seek to percentage in file corresponding to fraction of width\n"
2991            );
2992 }
2993
2994 static void opt_input_file(const char *filename)
2995 {
2996     if (input_filename) {
2997         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
2998                 filename, input_filename);
2999         exit(1);
3000     }
3001     if (!strcmp(filename, "-"))
3002         filename = "pipe:";
3003     input_filename = filename;
3004 }
3005
3006 /* Called from the main */
3007 int main(int argc, char **argv)
3008 {
3009     int flags, i;
3010
3011     /* register all codecs, demux and protocols */
3012     avcodec_register_all();
3013     avdevice_register_all();
3014 #if CONFIG_AVFILTER
3015     avfilter_register_all();
3016 #endif
3017     av_register_all();
3018
3019     for(i=0; i<CODEC_TYPE_NB; i++){
3020         avcodec_opts[i]= avcodec_alloc_context2(i);
3021     }
3022     avformat_opts = avformat_alloc_context();
3023 #if !CONFIG_AVFILTER
3024     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3025 #endif
3026
3027     show_banner();
3028
3029     parse_options(argc, argv, options, opt_input_file);
3030
3031     if (!input_filename) {
3032         show_usage();
3033         fprintf(stderr, "An input file must be specified\n");
3034         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3035         exit(1);
3036     }
3037
3038     if (display_disable) {
3039         video_disable = 1;
3040     }
3041     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3042 #if !defined(__MINGW32__) && !defined(__APPLE__)
3043     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3044 #endif
3045     if (SDL_Init (flags)) {
3046         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3047         exit(1);
3048     }
3049
3050     if (!display_disable) {
3051 #if HAVE_SDL_VIDEO_SIZE
3052         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3053         fs_screen_width = vi->current_w;
3054         fs_screen_height = vi->current_h;
3055 #endif
3056     }
3057
3058     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3059     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3060     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3061
3062     av_init_packet(&flush_pkt);
3063     flush_pkt.data= "FLUSH";
3064
3065     cur_stream = stream_open(input_filename, file_iformat);
3066
3067     event_loop();
3068
3069     /* never returns */
3070
3071     return 0;
3072 }