Move variable declaration, fixes the warning:
[ffmpeg.git] / ffplay.c
1 /*
2  * FFplay : Simple Media Player based on the FFmpeg libraries
3  * Copyright (c) 2003 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21
22 #include "config.h"
23 #include <inttypes.h>
24 #include <math.h>
25 #include <limits.h>
26 #include "libavutil/avstring.h"
27 #include "libavutil/pixdesc.h"
28 #include "libavformat/avformat.h"
29 #include "libavdevice/avdevice.h"
30 #include "libswscale/swscale.h"
31 #include "libavcodec/audioconvert.h"
32 #include "libavcodec/colorspace.h"
33 #include "libavcodec/opt.h"
34 #include "libavcodec/avfft.h"
35
36 #if CONFIG_AVFILTER
37 # include "libavfilter/avfilter.h"
38 # include "libavfilter/avfiltergraph.h"
39 # include "libavfilter/graphparser.h"
40 #endif
41
42 #include "cmdutils.h"
43
44 #include <SDL.h>
45 #include <SDL_thread.h>
46
47 #ifdef __MINGW32__
48 #undef main /* We don't want SDL to override our main() */
49 #endif
50
51 #include <unistd.h>
52 #include <assert.h>
53
54 const char program_name[] = "FFplay";
55 const int program_birth_year = 2003;
56
57 //#define DEBUG_SYNC
58
59 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
60 #define MIN_AUDIOQ_SIZE (20 * 16 * 1024)
61 #define MIN_FRAMES 5
62
63 /* SDL audio buffer size, in samples. Should be small to have precise
64    A/V sync as SDL does not have hardware buffer fullness info. */
65 #define SDL_AUDIO_BUFFER_SIZE 1024
66
67 /* no AV sync correction is done if below the AV sync threshold */
68 #define AV_SYNC_THRESHOLD 0.01
69 /* no AV correction is done if too big error */
70 #define AV_NOSYNC_THRESHOLD 10.0
71
72 #define FRAME_SKIP_FACTOR 0.05
73
74 /* maximum audio speed change to get correct sync */
75 #define SAMPLE_CORRECTION_PERCENT_MAX 10
76
77 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
78 #define AUDIO_DIFF_AVG_NB   20
79
80 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
81 #define SAMPLE_ARRAY_SIZE (2*65536)
82
83 #if !CONFIG_AVFILTER
84 static int sws_flags = SWS_BICUBIC;
85 #endif
86
87 typedef struct PacketQueue {
88     AVPacketList *first_pkt, *last_pkt;
89     int nb_packets;
90     int size;
91     int abort_request;
92     SDL_mutex *mutex;
93     SDL_cond *cond;
94 } PacketQueue;
95
96 #define VIDEO_PICTURE_QUEUE_SIZE 2
97 #define SUBPICTURE_QUEUE_SIZE 4
98
99 typedef struct VideoPicture {
100     double pts;                                  ///<presentation time stamp for this picture
101     double target_clock;                         ///<av_gettime() time at which this should be displayed ideally
102     int64_t pos;                                 ///<byte position in file
103     SDL_Overlay *bmp;
104     int width, height; /* source height & width */
105     int allocated;
106     enum PixelFormat pix_fmt;
107
108 #if CONFIG_AVFILTER
109     AVFilterPicRef *picref;
110 #endif
111 } VideoPicture;
112
113 typedef struct SubPicture {
114     double pts; /* presentation time stamp for this picture */
115     AVSubtitle sub;
116 } SubPicture;
117
118 enum {
119     AV_SYNC_AUDIO_MASTER, /* default choice */
120     AV_SYNC_VIDEO_MASTER,
121     AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
122 };
123
124 typedef struct VideoState {
125     SDL_Thread *parse_tid;
126     SDL_Thread *video_tid;
127     SDL_Thread *refresh_tid;
128     AVInputFormat *iformat;
129     int no_background;
130     int abort_request;
131     int paused;
132     int last_paused;
133     int seek_req;
134     int seek_flags;
135     int64_t seek_pos;
136     int64_t seek_rel;
137     int read_pause_return;
138     AVFormatContext *ic;
139     int dtg_active_format;
140
141     int audio_stream;
142
143     int av_sync_type;
144     double external_clock; /* external clock base */
145     int64_t external_clock_time;
146
147     double audio_clock;
148     double audio_diff_cum; /* used for AV difference average computation */
149     double audio_diff_avg_coef;
150     double audio_diff_threshold;
151     int audio_diff_avg_count;
152     AVStream *audio_st;
153     PacketQueue audioq;
154     int audio_hw_buf_size;
155     /* samples output by the codec. we reserve more space for avsync
156        compensation */
157     DECLARE_ALIGNED(16,uint8_t,audio_buf1)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
158     DECLARE_ALIGNED(16,uint8_t,audio_buf2)[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
159     uint8_t *audio_buf;
160     unsigned int audio_buf_size; /* in bytes */
161     int audio_buf_index; /* in bytes */
162     AVPacket audio_pkt_temp;
163     AVPacket audio_pkt;
164     enum SampleFormat audio_src_fmt;
165     AVAudioConvert *reformat_ctx;
166
167     int show_audio; /* if true, display audio samples */
168     int16_t sample_array[SAMPLE_ARRAY_SIZE];
169     int sample_array_index;
170     int last_i_start;
171     RDFTContext *rdft;
172     int rdft_bits;
173     int xpos;
174
175     SDL_Thread *subtitle_tid;
176     int subtitle_stream;
177     int subtitle_stream_changed;
178     AVStream *subtitle_st;
179     PacketQueue subtitleq;
180     SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
181     int subpq_size, subpq_rindex, subpq_windex;
182     SDL_mutex *subpq_mutex;
183     SDL_cond *subpq_cond;
184
185     double frame_timer;
186     double frame_last_pts;
187     double frame_last_delay;
188     double video_clock;                          ///<pts of last decoded frame / predicted pts of next decoded frame
189     int video_stream;
190     AVStream *video_st;
191     PacketQueue videoq;
192     double video_current_pts;                    ///<current displayed pts (different from video_clock if frame fifos are used)
193     double video_current_pts_drift;              ///<video_current_pts - time (av_gettime) at which we updated video_current_pts - used to have running video pts
194     int64_t video_current_pos;                   ///<current displayed file pos
195     VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
196     int pictq_size, pictq_rindex, pictq_windex;
197     SDL_mutex *pictq_mutex;
198     SDL_cond *pictq_cond;
199 #if !CONFIG_AVFILTER
200     struct SwsContext *img_convert_ctx;
201 #endif
202
203     //    QETimer *video_timer;
204     char filename[1024];
205     int width, height, xleft, ytop;
206
207     int64_t faulty_pts;
208     int64_t faulty_dts;
209     int64_t last_dts_for_fault_detection;
210     int64_t last_pts_for_fault_detection;
211
212 #if CONFIG_AVFILTER
213     AVFilterContext *out_video_filter;          ///<the last filter in the video chain
214 #endif
215
216     float skip_frames;
217     float skip_frames_index;
218     int refresh;
219 } VideoState;
220
221 static void show_help(void);
222 static int audio_write_get_buf_size(VideoState *is);
223
224 /* options specified by the user */
225 static AVInputFormat *file_iformat;
226 static const char *input_filename;
227 static const char *window_title;
228 static int fs_screen_width;
229 static int fs_screen_height;
230 static int screen_width = 0;
231 static int screen_height = 0;
232 static int frame_width = 0;
233 static int frame_height = 0;
234 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
235 static int audio_disable;
236 static int video_disable;
237 static int wanted_stream[CODEC_TYPE_NB]={
238     [CODEC_TYPE_AUDIO]=-1,
239     [CODEC_TYPE_VIDEO]=-1,
240     [CODEC_TYPE_SUBTITLE]=-1,
241 };
242 static int seek_by_bytes=-1;
243 static int display_disable;
244 static int show_status = 1;
245 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
246 static int64_t start_time = AV_NOPTS_VALUE;
247 static int debug = 0;
248 static int debug_mv = 0;
249 static int step = 0;
250 static int thread_count = 1;
251 static int workaround_bugs = 1;
252 static int fast = 0;
253 static int genpts = 0;
254 static int lowres = 0;
255 static int idct = FF_IDCT_AUTO;
256 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
257 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
258 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
259 static int error_recognition = FF_ER_CAREFUL;
260 static int error_concealment = 3;
261 static int decoder_reorder_pts= -1;
262 static int autoexit;
263 static int framedrop=1;
264
265 static int rdftspeed=20;
266 #if CONFIG_AVFILTER
267 static char *vfilters = NULL;
268 #endif
269
270 /* current context */
271 static int is_full_screen;
272 static VideoState *cur_stream;
273 static int64_t audio_callback_time;
274
275 static AVPacket flush_pkt;
276
277 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
278 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
279 #define FF_QUIT_EVENT    (SDL_USEREVENT + 2)
280
281 static SDL_Surface *screen;
282
283 static int packet_queue_put(PacketQueue *q, AVPacket *pkt);
284
285 /* packet queue handling */
286 static void packet_queue_init(PacketQueue *q)
287 {
288     memset(q, 0, sizeof(PacketQueue));
289     q->mutex = SDL_CreateMutex();
290     q->cond = SDL_CreateCond();
291     packet_queue_put(q, &flush_pkt);
292 }
293
294 static void packet_queue_flush(PacketQueue *q)
295 {
296     AVPacketList *pkt, *pkt1;
297
298     SDL_LockMutex(q->mutex);
299     for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
300         pkt1 = pkt->next;
301         av_free_packet(&pkt->pkt);
302         av_freep(&pkt);
303     }
304     q->last_pkt = NULL;
305     q->first_pkt = NULL;
306     q->nb_packets = 0;
307     q->size = 0;
308     SDL_UnlockMutex(q->mutex);
309 }
310
311 static void packet_queue_end(PacketQueue *q)
312 {
313     packet_queue_flush(q);
314     SDL_DestroyMutex(q->mutex);
315     SDL_DestroyCond(q->cond);
316 }
317
318 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
319 {
320     AVPacketList *pkt1;
321
322     /* duplicate the packet */
323     if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
324         return -1;
325
326     pkt1 = av_malloc(sizeof(AVPacketList));
327     if (!pkt1)
328         return -1;
329     pkt1->pkt = *pkt;
330     pkt1->next = NULL;
331
332
333     SDL_LockMutex(q->mutex);
334
335     if (!q->last_pkt)
336
337         q->first_pkt = pkt1;
338     else
339         q->last_pkt->next = pkt1;
340     q->last_pkt = pkt1;
341     q->nb_packets++;
342     q->size += pkt1->pkt.size + sizeof(*pkt1);
343     /* XXX: should duplicate packet data in DV case */
344     SDL_CondSignal(q->cond);
345
346     SDL_UnlockMutex(q->mutex);
347     return 0;
348 }
349
350 static void packet_queue_abort(PacketQueue *q)
351 {
352     SDL_LockMutex(q->mutex);
353
354     q->abort_request = 1;
355
356     SDL_CondSignal(q->cond);
357
358     SDL_UnlockMutex(q->mutex);
359 }
360
361 /* return < 0 if aborted, 0 if no packet and > 0 if packet.  */
362 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
363 {
364     AVPacketList *pkt1;
365     int ret;
366
367     SDL_LockMutex(q->mutex);
368
369     for(;;) {
370         if (q->abort_request) {
371             ret = -1;
372             break;
373         }
374
375         pkt1 = q->first_pkt;
376         if (pkt1) {
377             q->first_pkt = pkt1->next;
378             if (!q->first_pkt)
379                 q->last_pkt = NULL;
380             q->nb_packets--;
381             q->size -= pkt1->pkt.size + sizeof(*pkt1);
382             *pkt = pkt1->pkt;
383             av_free(pkt1);
384             ret = 1;
385             break;
386         } else if (!block) {
387             ret = 0;
388             break;
389         } else {
390             SDL_CondWait(q->cond, q->mutex);
391         }
392     }
393     SDL_UnlockMutex(q->mutex);
394     return ret;
395 }
396
397 static inline void fill_rectangle(SDL_Surface *screen,
398                                   int x, int y, int w, int h, int color)
399 {
400     SDL_Rect rect;
401     rect.x = x;
402     rect.y = y;
403     rect.w = w;
404     rect.h = h;
405     SDL_FillRect(screen, &rect, color);
406 }
407
408 #if 0
409 /* draw only the border of a rectangle */
410 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
411 {
412     int w1, w2, h1, h2;
413
414     /* fill the background */
415     w1 = x;
416     if (w1 < 0)
417         w1 = 0;
418     w2 = s->width - (x + w);
419     if (w2 < 0)
420         w2 = 0;
421     h1 = y;
422     if (h1 < 0)
423         h1 = 0;
424     h2 = s->height - (y + h);
425     if (h2 < 0)
426         h2 = 0;
427     fill_rectangle(screen,
428                    s->xleft, s->ytop,
429                    w1, s->height,
430                    color);
431     fill_rectangle(screen,
432                    s->xleft + s->width - w2, s->ytop,
433                    w2, s->height,
434                    color);
435     fill_rectangle(screen,
436                    s->xleft + w1, s->ytop,
437                    s->width - w1 - w2, h1,
438                    color);
439     fill_rectangle(screen,
440                    s->xleft + w1, s->ytop + s->height - h2,
441                    s->width - w1 - w2, h2,
442                    color);
443 }
444 #endif
445
446 #define ALPHA_BLEND(a, oldp, newp, s)\
447 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
448
449 #define RGBA_IN(r, g, b, a, s)\
450 {\
451     unsigned int v = ((const uint32_t *)(s))[0];\
452     a = (v >> 24) & 0xff;\
453     r = (v >> 16) & 0xff;\
454     g = (v >> 8) & 0xff;\
455     b = v & 0xff;\
456 }
457
458 #define YUVA_IN(y, u, v, a, s, pal)\
459 {\
460     unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
461     a = (val >> 24) & 0xff;\
462     y = (val >> 16) & 0xff;\
463     u = (val >> 8) & 0xff;\
464     v = val & 0xff;\
465 }
466
467 #define YUVA_OUT(d, y, u, v, a)\
468 {\
469     ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
470 }
471
472
473 #define BPP 1
474
475 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
476 {
477     int wrap, wrap3, width2, skip2;
478     int y, u, v, a, u1, v1, a1, w, h;
479     uint8_t *lum, *cb, *cr;
480     const uint8_t *p;
481     const uint32_t *pal;
482     int dstx, dsty, dstw, dsth;
483
484     dstw = av_clip(rect->w, 0, imgw);
485     dsth = av_clip(rect->h, 0, imgh);
486     dstx = av_clip(rect->x, 0, imgw - dstw);
487     dsty = av_clip(rect->y, 0, imgh - dsth);
488     lum = dst->data[0] + dsty * dst->linesize[0];
489     cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
490     cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
491
492     width2 = ((dstw + 1) >> 1) + (dstx & ~dstw & 1);
493     skip2 = dstx >> 1;
494     wrap = dst->linesize[0];
495     wrap3 = rect->pict.linesize[0];
496     p = rect->pict.data[0];
497     pal = (const uint32_t *)rect->pict.data[1];  /* Now in YCrCb! */
498
499     if (dsty & 1) {
500         lum += dstx;
501         cb += skip2;
502         cr += skip2;
503
504         if (dstx & 1) {
505             YUVA_IN(y, u, v, a, p, pal);
506             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
507             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
508             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
509             cb++;
510             cr++;
511             lum++;
512             p += BPP;
513         }
514         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
515             YUVA_IN(y, u, v, a, p, pal);
516             u1 = u;
517             v1 = v;
518             a1 = a;
519             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
520
521             YUVA_IN(y, u, v, a, p + BPP, pal);
522             u1 += u;
523             v1 += v;
524             a1 += a;
525             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
526             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
527             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
528             cb++;
529             cr++;
530             p += 2 * BPP;
531             lum += 2;
532         }
533         if (w) {
534             YUVA_IN(y, u, v, a, p, pal);
535             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
536             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
537             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
538             p++;
539             lum++;
540         }
541         p += wrap3 - dstw * BPP;
542         lum += wrap - dstw - dstx;
543         cb += dst->linesize[1] - width2 - skip2;
544         cr += dst->linesize[2] - width2 - skip2;
545     }
546     for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
547         lum += dstx;
548         cb += skip2;
549         cr += skip2;
550
551         if (dstx & 1) {
552             YUVA_IN(y, u, v, a, p, pal);
553             u1 = u;
554             v1 = v;
555             a1 = a;
556             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
557             p += wrap3;
558             lum += wrap;
559             YUVA_IN(y, u, v, a, p, pal);
560             u1 += u;
561             v1 += v;
562             a1 += a;
563             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
565             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
566             cb++;
567             cr++;
568             p += -wrap3 + BPP;
569             lum += -wrap + 1;
570         }
571         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
572             YUVA_IN(y, u, v, a, p, pal);
573             u1 = u;
574             v1 = v;
575             a1 = a;
576             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
577
578             YUVA_IN(y, u, v, a, p + BPP, pal);
579             u1 += u;
580             v1 += v;
581             a1 += a;
582             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
583             p += wrap3;
584             lum += wrap;
585
586             YUVA_IN(y, u, v, a, p, pal);
587             u1 += u;
588             v1 += v;
589             a1 += a;
590             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
591
592             YUVA_IN(y, u, v, a, p + BPP, pal);
593             u1 += u;
594             v1 += v;
595             a1 += a;
596             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
597
598             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
599             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
600
601             cb++;
602             cr++;
603             p += -wrap3 + 2 * BPP;
604             lum += -wrap + 2;
605         }
606         if (w) {
607             YUVA_IN(y, u, v, a, p, pal);
608             u1 = u;
609             v1 = v;
610             a1 = a;
611             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
612             p += wrap3;
613             lum += wrap;
614             YUVA_IN(y, u, v, a, p, pal);
615             u1 += u;
616             v1 += v;
617             a1 += a;
618             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
619             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
620             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
621             cb++;
622             cr++;
623             p += -wrap3 + BPP;
624             lum += -wrap + 1;
625         }
626         p += wrap3 + (wrap3 - dstw * BPP);
627         lum += wrap + (wrap - dstw - dstx);
628         cb += dst->linesize[1] - width2 - skip2;
629         cr += dst->linesize[2] - width2 - skip2;
630     }
631     /* handle odd height */
632     if (h) {
633         lum += dstx;
634         cb += skip2;
635         cr += skip2;
636
637         if (dstx & 1) {
638             YUVA_IN(y, u, v, a, p, pal);
639             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
640             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
641             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
642             cb++;
643             cr++;
644             lum++;
645             p += BPP;
646         }
647         for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
648             YUVA_IN(y, u, v, a, p, pal);
649             u1 = u;
650             v1 = v;
651             a1 = a;
652             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
653
654             YUVA_IN(y, u, v, a, p + BPP, pal);
655             u1 += u;
656             v1 += v;
657             a1 += a;
658             lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
659             cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
660             cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
661             cb++;
662             cr++;
663             p += 2 * BPP;
664             lum += 2;
665         }
666         if (w) {
667             YUVA_IN(y, u, v, a, p, pal);
668             lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
669             cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
670             cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
671         }
672     }
673 }
674
675 static void free_subpicture(SubPicture *sp)
676 {
677     int i;
678
679     for (i = 0; i < sp->sub.num_rects; i++)
680     {
681         av_freep(&sp->sub.rects[i]->pict.data[0]);
682         av_freep(&sp->sub.rects[i]->pict.data[1]);
683         av_freep(&sp->sub.rects[i]);
684     }
685
686     av_free(sp->sub.rects);
687
688     memset(&sp->sub, 0, sizeof(AVSubtitle));
689 }
690
691 static void video_image_display(VideoState *is)
692 {
693     VideoPicture *vp;
694     SubPicture *sp;
695     AVPicture pict;
696     float aspect_ratio;
697     int width, height, x, y;
698     SDL_Rect rect;
699     int i;
700
701     vp = &is->pictq[is->pictq_rindex];
702     if (vp->bmp) {
703 #if CONFIG_AVFILTER
704          if (vp->picref->pixel_aspect.num == 0)
705              aspect_ratio = 0;
706          else
707              aspect_ratio = av_q2d(vp->picref->pixel_aspect);
708 #else
709
710         /* XXX: use variable in the frame */
711         if (is->video_st->sample_aspect_ratio.num)
712             aspect_ratio = av_q2d(is->video_st->sample_aspect_ratio);
713         else if (is->video_st->codec->sample_aspect_ratio.num)
714             aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio);
715         else
716             aspect_ratio = 0;
717 #endif
718         if (aspect_ratio <= 0.0)
719             aspect_ratio = 1.0;
720         aspect_ratio *= (float)vp->width / (float)vp->height;
721         /* if an active format is indicated, then it overrides the
722            mpeg format */
723 #if 0
724         if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
725             is->dtg_active_format = is->video_st->codec->dtg_active_format;
726             printf("dtg_active_format=%d\n", is->dtg_active_format);
727         }
728 #endif
729 #if 0
730         switch(is->video_st->codec->dtg_active_format) {
731         case FF_DTG_AFD_SAME:
732         default:
733             /* nothing to do */
734             break;
735         case FF_DTG_AFD_4_3:
736             aspect_ratio = 4.0 / 3.0;
737             break;
738         case FF_DTG_AFD_16_9:
739             aspect_ratio = 16.0 / 9.0;
740             break;
741         case FF_DTG_AFD_14_9:
742             aspect_ratio = 14.0 / 9.0;
743             break;
744         case FF_DTG_AFD_4_3_SP_14_9:
745             aspect_ratio = 14.0 / 9.0;
746             break;
747         case FF_DTG_AFD_16_9_SP_14_9:
748             aspect_ratio = 14.0 / 9.0;
749             break;
750         case FF_DTG_AFD_SP_4_3:
751             aspect_ratio = 4.0 / 3.0;
752             break;
753         }
754 #endif
755
756         if (is->subtitle_st)
757         {
758             if (is->subpq_size > 0)
759             {
760                 sp = &is->subpq[is->subpq_rindex];
761
762                 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
763                 {
764                     SDL_LockYUVOverlay (vp->bmp);
765
766                     pict.data[0] = vp->bmp->pixels[0];
767                     pict.data[1] = vp->bmp->pixels[2];
768                     pict.data[2] = vp->bmp->pixels[1];
769
770                     pict.linesize[0] = vp->bmp->pitches[0];
771                     pict.linesize[1] = vp->bmp->pitches[2];
772                     pict.linesize[2] = vp->bmp->pitches[1];
773
774                     for (i = 0; i < sp->sub.num_rects; i++)
775                         blend_subrect(&pict, sp->sub.rects[i],
776                                       vp->bmp->w, vp->bmp->h);
777
778                     SDL_UnlockYUVOverlay (vp->bmp);
779                 }
780             }
781         }
782
783
784         /* XXX: we suppose the screen has a 1.0 pixel ratio */
785         height = is->height;
786         width = ((int)rint(height * aspect_ratio)) & ~1;
787         if (width > is->width) {
788             width = is->width;
789             height = ((int)rint(width / aspect_ratio)) & ~1;
790         }
791         x = (is->width - width) / 2;
792         y = (is->height - height) / 2;
793         if (!is->no_background) {
794             /* fill the background */
795             //            fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
796         } else {
797             is->no_background = 0;
798         }
799         rect.x = is->xleft + x;
800         rect.y = is->ytop  + y;
801         rect.w = width;
802         rect.h = height;
803         SDL_DisplayYUVOverlay(vp->bmp, &rect);
804     } else {
805 #if 0
806         fill_rectangle(screen,
807                        is->xleft, is->ytop, is->width, is->height,
808                        QERGB(0x00, 0x00, 0x00));
809 #endif
810     }
811 }
812
813 static inline int compute_mod(int a, int b)
814 {
815     a = a % b;
816     if (a >= 0)
817         return a;
818     else
819         return a + b;
820 }
821
822 static void video_audio_display(VideoState *s)
823 {
824     int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
825     int ch, channels, h, h2, bgcolor, fgcolor;
826     int16_t time_diff;
827     int rdft_bits, nb_freq;
828
829     for(rdft_bits=1; (1<<rdft_bits)<2*s->height; rdft_bits++)
830         ;
831     nb_freq= 1<<(rdft_bits-1);
832
833     /* compute display index : center on currently output samples */
834     channels = s->audio_st->codec->channels;
835     nb_display_channels = channels;
836     if (!s->paused) {
837         int data_used= s->show_audio==1 ? s->width : (2*nb_freq);
838         n = 2 * channels;
839         delay = audio_write_get_buf_size(s);
840         delay /= n;
841
842         /* to be more precise, we take into account the time spent since
843            the last buffer computation */
844         if (audio_callback_time) {
845             time_diff = av_gettime() - audio_callback_time;
846             delay -= (time_diff * s->audio_st->codec->sample_rate) / 1000000;
847         }
848
849         delay += 2*data_used;
850         if (delay < data_used)
851             delay = data_used;
852
853         i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
854         if(s->show_audio==1){
855             h= INT_MIN;
856             for(i=0; i<1000; i+=channels){
857                 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
858                 int a= s->sample_array[idx];
859                 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
860                 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
861                 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
862                 int score= a-d;
863                 if(h<score && (b^c)<0){
864                     h= score;
865                     i_start= idx;
866                 }
867             }
868         }
869
870         s->last_i_start = i_start;
871     } else {
872         i_start = s->last_i_start;
873     }
874
875     bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
876     if(s->show_audio==1){
877         fill_rectangle(screen,
878                        s->xleft, s->ytop, s->width, s->height,
879                        bgcolor);
880
881         fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
882
883         /* total height for one channel */
884         h = s->height / nb_display_channels;
885         /* graph height / 2 */
886         h2 = (h * 9) / 20;
887         for(ch = 0;ch < nb_display_channels; ch++) {
888             i = i_start + ch;
889             y1 = s->ytop + ch * h + (h / 2); /* position of center line */
890             for(x = 0; x < s->width; x++) {
891                 y = (s->sample_array[i] * h2) >> 15;
892                 if (y < 0) {
893                     y = -y;
894                     ys = y1 - y;
895                 } else {
896                     ys = y1;
897                 }
898                 fill_rectangle(screen,
899                                s->xleft + x, ys, 1, y,
900                                fgcolor);
901                 i += channels;
902                 if (i >= SAMPLE_ARRAY_SIZE)
903                     i -= SAMPLE_ARRAY_SIZE;
904             }
905         }
906
907         fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
908
909         for(ch = 1;ch < nb_display_channels; ch++) {
910             y = s->ytop + ch * h;
911             fill_rectangle(screen,
912                            s->xleft, y, s->width, 1,
913                            fgcolor);
914         }
915         SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
916     }else{
917         nb_display_channels= FFMIN(nb_display_channels, 2);
918         if(rdft_bits != s->rdft_bits){
919             av_rdft_end(s->rdft);
920             s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
921             s->rdft_bits= rdft_bits;
922         }
923         {
924             FFTSample data[2][2*nb_freq];
925             for(ch = 0;ch < nb_display_channels; ch++) {
926                 i = i_start + ch;
927                 for(x = 0; x < 2*nb_freq; x++) {
928                     double w= (x-nb_freq)*(1.0/nb_freq);
929                     data[ch][x]= s->sample_array[i]*(1.0-w*w);
930                     i += channels;
931                     if (i >= SAMPLE_ARRAY_SIZE)
932                         i -= SAMPLE_ARRAY_SIZE;
933                 }
934                 av_rdft_calc(s->rdft, data[ch]);
935             }
936             //least efficient way to do this, we should of course directly access it but its more than fast enough
937             for(y=0; y<s->height; y++){
938                 double w= 1/sqrt(nb_freq);
939                 int a= sqrt(w*sqrt(data[0][2*y+0]*data[0][2*y+0] + data[0][2*y+1]*data[0][2*y+1]));
940                 int b= sqrt(w*sqrt(data[1][2*y+0]*data[1][2*y+0] + data[1][2*y+1]*data[1][2*y+1]));
941                 a= FFMIN(a,255);
942                 b= FFMIN(b,255);
943                 fgcolor = SDL_MapRGB(screen->format, a, b, (a+b)/2);
944
945                 fill_rectangle(screen,
946                             s->xpos, s->height-y, 1, 1,
947                             fgcolor);
948             }
949         }
950         SDL_UpdateRect(screen, s->xpos, s->ytop, 1, s->height);
951         s->xpos++;
952         if(s->xpos >= s->width)
953             s->xpos= s->xleft;
954     }
955 }
956
957 static int video_open(VideoState *is){
958     int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
959     int w,h;
960
961     if(is_full_screen) flags |= SDL_FULLSCREEN;
962     else               flags |= SDL_RESIZABLE;
963
964     if (is_full_screen && fs_screen_width) {
965         w = fs_screen_width;
966         h = fs_screen_height;
967     } else if(!is_full_screen && screen_width){
968         w = screen_width;
969         h = screen_height;
970 #if CONFIG_AVFILTER
971     }else if (is->out_video_filter && is->out_video_filter->inputs[0]){
972         w = is->out_video_filter->inputs[0]->w;
973         h = is->out_video_filter->inputs[0]->h;
974 #else
975     }else if (is->video_st && is->video_st->codec->width){
976         w = is->video_st->codec->width;
977         h = is->video_st->codec->height;
978 #endif
979     } else {
980         w = 640;
981         h = 480;
982     }
983     if(screen && is->width == screen->w && screen->w == w
984        && is->height== screen->h && screen->h == h)
985         return 0;
986
987 #ifndef __APPLE__
988     screen = SDL_SetVideoMode(w, h, 0, flags);
989 #else
990     /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
991     screen = SDL_SetVideoMode(w, h, 24, flags);
992 #endif
993     if (!screen) {
994         fprintf(stderr, "SDL: could not set video mode - exiting\n");
995         return -1;
996     }
997     if (!window_title)
998         window_title = input_filename;
999     SDL_WM_SetCaption(window_title, window_title);
1000
1001     is->width = screen->w;
1002     is->height = screen->h;
1003
1004     return 0;
1005 }
1006
1007 /* display the current picture, if any */
1008 static void video_display(VideoState *is)
1009 {
1010     if(!screen)
1011         video_open(cur_stream);
1012     if (is->audio_st && is->show_audio)
1013         video_audio_display(is);
1014     else if (is->video_st)
1015         video_image_display(is);
1016 }
1017
1018 static int refresh_thread(void *opaque)
1019 {
1020     VideoState *is= opaque;
1021     while(!is->abort_request){
1022     SDL_Event event;
1023     event.type = FF_REFRESH_EVENT;
1024     event.user.data1 = opaque;
1025         if(!is->refresh){
1026             is->refresh=1;
1027     SDL_PushEvent(&event);
1028         }
1029         usleep(is->audio_st && is->show_audio ? rdftspeed*1000 : 5000); //FIXME ideally we should wait the correct time but SDLs event passing is so slow it would be silly
1030     }
1031     return 0;
1032 }
1033
1034 /* get the current audio clock value */
1035 static double get_audio_clock(VideoState *is)
1036 {
1037     double pts;
1038     int hw_buf_size, bytes_per_sec;
1039     pts = is->audio_clock;
1040     hw_buf_size = audio_write_get_buf_size(is);
1041     bytes_per_sec = 0;
1042     if (is->audio_st) {
1043         bytes_per_sec = is->audio_st->codec->sample_rate *
1044             2 * is->audio_st->codec->channels;
1045     }
1046     if (bytes_per_sec)
1047         pts -= (double)hw_buf_size / bytes_per_sec;
1048     return pts;
1049 }
1050
1051 /* get the current video clock value */
1052 static double get_video_clock(VideoState *is)
1053 {
1054     if (is->paused) {
1055         return is->video_current_pts;
1056     } else {
1057         return is->video_current_pts_drift + av_gettime() / 1000000.0;
1058     }
1059 }
1060
1061 /* get the current external clock value */
1062 static double get_external_clock(VideoState *is)
1063 {
1064     int64_t ti;
1065     ti = av_gettime();
1066     return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
1067 }
1068
1069 /* get the current master clock value */
1070 static double get_master_clock(VideoState *is)
1071 {
1072     double val;
1073
1074     if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1075         if (is->video_st)
1076             val = get_video_clock(is);
1077         else
1078             val = get_audio_clock(is);
1079     } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1080         if (is->audio_st)
1081             val = get_audio_clock(is);
1082         else
1083             val = get_video_clock(is);
1084     } else {
1085         val = get_external_clock(is);
1086     }
1087     return val;
1088 }
1089
1090 /* seek in the stream */
1091 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1092 {
1093     if (!is->seek_req) {
1094         is->seek_pos = pos;
1095         is->seek_rel = rel;
1096         is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1097         if (seek_by_bytes)
1098             is->seek_flags |= AVSEEK_FLAG_BYTE;
1099         is->seek_req = 1;
1100     }
1101 }
1102
1103 /* pause or resume the video */
1104 static void stream_pause(VideoState *is)
1105 {
1106     if (is->paused) {
1107         is->frame_timer += av_gettime() / 1000000.0 + is->video_current_pts_drift - is->video_current_pts;
1108         if(is->read_pause_return != AVERROR(ENOSYS)){
1109             is->video_current_pts = is->video_current_pts_drift + av_gettime() / 1000000.0;
1110         }
1111         is->video_current_pts_drift = is->video_current_pts - av_gettime() / 1000000.0;
1112     }
1113     is->paused = !is->paused;
1114 }
1115
1116 static double compute_target_time(double frame_current_pts, VideoState *is)
1117 {
1118     double delay, sync_threshold, diff;
1119
1120     /* compute nominal delay */
1121     delay = frame_current_pts - is->frame_last_pts;
1122     if (delay <= 0 || delay >= 10.0) {
1123         /* if incorrect delay, use previous one */
1124         delay = is->frame_last_delay;
1125     } else {
1126         is->frame_last_delay = delay;
1127     }
1128     is->frame_last_pts = frame_current_pts;
1129
1130     /* update delay to follow master synchronisation source */
1131     if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1132          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1133         /* if video is slave, we try to correct big delays by
1134            duplicating or deleting a frame */
1135         diff = get_video_clock(is) - get_master_clock(is);
1136
1137         /* skip or repeat frame. We take into account the
1138            delay to compute the threshold. I still don't know
1139            if it is the best guess */
1140         sync_threshold = FFMAX(AV_SYNC_THRESHOLD, delay);
1141         if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1142             if (diff <= -sync_threshold)
1143                 delay = 0;
1144             else if (diff >= sync_threshold)
1145                 delay = 2 * delay;
1146         }
1147     }
1148     is->frame_timer += delay;
1149 #if defined(DEBUG_SYNC)
1150     printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1151             delay, actual_delay, frame_current_pts, -diff);
1152 #endif
1153
1154     return is->frame_timer;
1155 }
1156
1157 /* called to display each frame */
1158 static void video_refresh_timer(void *opaque)
1159 {
1160     VideoState *is = opaque;
1161     VideoPicture *vp;
1162
1163     SubPicture *sp, *sp2;
1164
1165     if (is->video_st) {
1166 retry:
1167         if (is->pictq_size == 0) {
1168             //nothing to do, no picture to display in the que
1169         } else {
1170             double time= av_gettime()/1000000.0;
1171             double next_target;
1172             /* dequeue the picture */
1173             vp = &is->pictq[is->pictq_rindex];
1174
1175             if(time < vp->target_clock)
1176                 return;
1177             /* update current video pts */
1178             is->video_current_pts = vp->pts;
1179             is->video_current_pts_drift = is->video_current_pts - time;
1180             is->video_current_pos = vp->pos;
1181             if(is->pictq_size > 1){
1182                 VideoPicture *nextvp= &is->pictq[(is->pictq_rindex+1)%VIDEO_PICTURE_QUEUE_SIZE];
1183                 assert(nextvp->target_clock >= vp->target_clock);
1184                 next_target= nextvp->target_clock;
1185             }else{
1186                 next_target= vp->target_clock + is->video_clock - vp->pts; //FIXME pass durations cleanly
1187             }
1188             if(framedrop && time > next_target){
1189                 is->skip_frames *= 1.0 + FRAME_SKIP_FACTOR;
1190                 if(is->pictq_size > 1 || time > next_target + 0.5){
1191                     /* update queue size and signal for next picture */
1192                     if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1193                         is->pictq_rindex = 0;
1194
1195                     SDL_LockMutex(is->pictq_mutex);
1196                     is->pictq_size--;
1197                     SDL_CondSignal(is->pictq_cond);
1198                     SDL_UnlockMutex(is->pictq_mutex);
1199                     goto retry;
1200                 }
1201             }
1202
1203             if(is->subtitle_st) {
1204                 if (is->subtitle_stream_changed) {
1205                     SDL_LockMutex(is->subpq_mutex);
1206
1207                     while (is->subpq_size) {
1208                         free_subpicture(&is->subpq[is->subpq_rindex]);
1209
1210                         /* update queue size and signal for next picture */
1211                         if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1212                             is->subpq_rindex = 0;
1213
1214                         is->subpq_size--;
1215                     }
1216                     is->subtitle_stream_changed = 0;
1217
1218                     SDL_CondSignal(is->subpq_cond);
1219                     SDL_UnlockMutex(is->subpq_mutex);
1220                 } else {
1221                     if (is->subpq_size > 0) {
1222                         sp = &is->subpq[is->subpq_rindex];
1223
1224                         if (is->subpq_size > 1)
1225                             sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1226                         else
1227                             sp2 = NULL;
1228
1229                         if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1230                                 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1231                         {
1232                             free_subpicture(sp);
1233
1234                             /* update queue size and signal for next picture */
1235                             if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1236                                 is->subpq_rindex = 0;
1237
1238                             SDL_LockMutex(is->subpq_mutex);
1239                             is->subpq_size--;
1240                             SDL_CondSignal(is->subpq_cond);
1241                             SDL_UnlockMutex(is->subpq_mutex);
1242                         }
1243                     }
1244                 }
1245             }
1246
1247             /* display picture */
1248             video_display(is);
1249
1250             /* update queue size and signal for next picture */
1251             if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1252                 is->pictq_rindex = 0;
1253
1254             SDL_LockMutex(is->pictq_mutex);
1255             is->pictq_size--;
1256             SDL_CondSignal(is->pictq_cond);
1257             SDL_UnlockMutex(is->pictq_mutex);
1258         }
1259     } else if (is->audio_st) {
1260         /* draw the next audio frame */
1261
1262         /* if only audio stream, then display the audio bars (better
1263            than nothing, just to test the implementation */
1264
1265         /* display picture */
1266         video_display(is);
1267     }
1268     if (show_status) {
1269         static int64_t last_time;
1270         int64_t cur_time;
1271         int aqsize, vqsize, sqsize;
1272         double av_diff;
1273
1274         cur_time = av_gettime();
1275         if (!last_time || (cur_time - last_time) >= 30000) {
1276             aqsize = 0;
1277             vqsize = 0;
1278             sqsize = 0;
1279             if (is->audio_st)
1280                 aqsize = is->audioq.size;
1281             if (is->video_st)
1282                 vqsize = is->videoq.size;
1283             if (is->subtitle_st)
1284                 sqsize = is->subtitleq.size;
1285             av_diff = 0;
1286             if (is->audio_st && is->video_st)
1287                 av_diff = get_audio_clock(is) - get_video_clock(is);
1288             printf("%7.2f A-V:%7.3f s:%3.1f aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64"   \r",
1289                    get_master_clock(is), av_diff, FFMAX(is->skip_frames-1, 0), aqsize / 1024, vqsize / 1024, sqsize, is->faulty_dts, is->faulty_pts);
1290             fflush(stdout);
1291             last_time = cur_time;
1292         }
1293     }
1294 }
1295
1296 /* allocate a picture (needs to do that in main thread to avoid
1297    potential locking problems */
1298 static void alloc_picture(void *opaque)
1299 {
1300     VideoState *is = opaque;
1301     VideoPicture *vp;
1302
1303     vp = &is->pictq[is->pictq_windex];
1304
1305     if (vp->bmp)
1306         SDL_FreeYUVOverlay(vp->bmp);
1307
1308 #if CONFIG_AVFILTER
1309     if (vp->picref)
1310         avfilter_unref_pic(vp->picref);
1311     vp->picref = NULL;
1312
1313     vp->width   = is->out_video_filter->inputs[0]->w;
1314     vp->height  = is->out_video_filter->inputs[0]->h;
1315     vp->pix_fmt = is->out_video_filter->inputs[0]->format;
1316 #else
1317     vp->width   = is->video_st->codec->width;
1318     vp->height  = is->video_st->codec->height;
1319     vp->pix_fmt = is->video_st->codec->pix_fmt;
1320 #endif
1321
1322     vp->bmp = SDL_CreateYUVOverlay(vp->width, vp->height,
1323                                    SDL_YV12_OVERLAY,
1324                                    screen);
1325
1326     SDL_LockMutex(is->pictq_mutex);
1327     vp->allocated = 1;
1328     SDL_CondSignal(is->pictq_cond);
1329     SDL_UnlockMutex(is->pictq_mutex);
1330 }
1331
1332 /**
1333  *
1334  * @param pts the dts of the pkt / pts of the frame and guessed if not known
1335  */
1336 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t pos)
1337 {
1338     VideoPicture *vp;
1339     int dst_pix_fmt;
1340 #if CONFIG_AVFILTER
1341     AVPicture pict_src;
1342 #endif
1343     /* wait until we have space to put a new picture */
1344     SDL_LockMutex(is->pictq_mutex);
1345
1346     if(is->pictq_size>=VIDEO_PICTURE_QUEUE_SIZE && !is->refresh)
1347         is->skip_frames= FFMAX(1.0 - FRAME_SKIP_FACTOR, is->skip_frames * (1.0-FRAME_SKIP_FACTOR));
1348
1349     while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1350            !is->videoq.abort_request) {
1351         SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1352     }
1353     SDL_UnlockMutex(is->pictq_mutex);
1354
1355     if (is->videoq.abort_request)
1356         return -1;
1357
1358     vp = &is->pictq[is->pictq_windex];
1359
1360     /* alloc or resize hardware picture buffer */
1361     if (!vp->bmp ||
1362 #if CONFIG_AVFILTER
1363         vp->width  != is->out_video_filter->inputs[0]->w ||
1364         vp->height != is->out_video_filter->inputs[0]->h) {
1365 #else
1366         vp->width != is->video_st->codec->width ||
1367         vp->height != is->video_st->codec->height) {
1368 #endif
1369         SDL_Event event;
1370
1371         vp->allocated = 0;
1372
1373         /* the allocation must be done in the main thread to avoid
1374            locking problems */
1375         event.type = FF_ALLOC_EVENT;
1376         event.user.data1 = is;
1377         SDL_PushEvent(&event);
1378
1379         /* wait until the picture is allocated */
1380         SDL_LockMutex(is->pictq_mutex);
1381         while (!vp->allocated && !is->videoq.abort_request) {
1382             SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1383         }
1384         SDL_UnlockMutex(is->pictq_mutex);
1385
1386         if (is->videoq.abort_request)
1387             return -1;
1388     }
1389
1390     /* if the frame is not skipped, then display it */
1391     if (vp->bmp) {
1392         AVPicture pict;
1393 #if CONFIG_AVFILTER
1394         if(vp->picref)
1395             avfilter_unref_pic(vp->picref);
1396         vp->picref = src_frame->opaque;
1397 #endif
1398
1399         /* get a pointer on the bitmap */
1400         SDL_LockYUVOverlay (vp->bmp);
1401
1402         dst_pix_fmt = PIX_FMT_YUV420P;
1403         memset(&pict,0,sizeof(AVPicture));
1404         pict.data[0] = vp->bmp->pixels[0];
1405         pict.data[1] = vp->bmp->pixels[2];
1406         pict.data[2] = vp->bmp->pixels[1];
1407
1408         pict.linesize[0] = vp->bmp->pitches[0];
1409         pict.linesize[1] = vp->bmp->pitches[2];
1410         pict.linesize[2] = vp->bmp->pitches[1];
1411
1412 #if CONFIG_AVFILTER
1413         pict_src.data[0] = src_frame->data[0];
1414         pict_src.data[1] = src_frame->data[1];
1415         pict_src.data[2] = src_frame->data[2];
1416
1417         pict_src.linesize[0] = src_frame->linesize[0];
1418         pict_src.linesize[1] = src_frame->linesize[1];
1419         pict_src.linesize[2] = src_frame->linesize[2];
1420
1421         //FIXME use direct rendering
1422         av_picture_copy(&pict, &pict_src,
1423                         vp->pix_fmt, vp->width, vp->height);
1424 #else
1425         sws_flags = av_get_int(sws_opts, "sws_flags", NULL);
1426         is->img_convert_ctx = sws_getCachedContext(is->img_convert_ctx,
1427             vp->width, vp->height, vp->pix_fmt, vp->width, vp->height,
1428             dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1429         if (is->img_convert_ctx == NULL) {
1430             fprintf(stderr, "Cannot initialize the conversion context\n");
1431             exit(1);
1432         }
1433         sws_scale(is->img_convert_ctx, src_frame->data, src_frame->linesize,
1434                   0, vp->height, pict.data, pict.linesize);
1435 #endif
1436         /* update the bitmap content */
1437         SDL_UnlockYUVOverlay(vp->bmp);
1438
1439         vp->pts = pts;
1440         vp->pos = pos;
1441
1442         /* now we can update the picture count */
1443         if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1444             is->pictq_windex = 0;
1445         SDL_LockMutex(is->pictq_mutex);
1446         vp->target_clock= compute_target_time(vp->pts, is);
1447
1448         is->pictq_size++;
1449         SDL_UnlockMutex(is->pictq_mutex);
1450     }
1451     return 0;
1452 }
1453
1454 /**
1455  * compute the exact PTS for the picture if it is omitted in the stream
1456  * @param pts1 the dts of the pkt / pts of the frame
1457  */
1458 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1, int64_t pos)
1459 {
1460     double frame_delay, pts;
1461
1462     pts = pts1;
1463
1464     if (pts != 0) {
1465         /* update video clock with pts, if present */
1466         is->video_clock = pts;
1467     } else {
1468         pts = is->video_clock;
1469     }
1470     /* update video clock for next frame */
1471     frame_delay = av_q2d(is->video_st->codec->time_base);
1472     /* for MPEG2, the frame can be repeated, so we update the
1473        clock accordingly */
1474     frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1475     is->video_clock += frame_delay;
1476
1477 #if defined(DEBUG_SYNC) && 0
1478     printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1479            av_get_pict_type_char(src_frame->pict_type), pts, pts1);
1480 #endif
1481     return queue_picture(is, src_frame, pts, pos);
1482 }
1483
1484 static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
1485 {
1486     int len1, got_picture, i;
1487
1488         if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1489             return -1;
1490
1491         if(pkt->data == flush_pkt.data){
1492             avcodec_flush_buffers(is->video_st->codec);
1493
1494             SDL_LockMutex(is->pictq_mutex);
1495             //Make sure there are no long delay timers (ideally we should just flush the que but thats harder)
1496             for(i=0; i<VIDEO_PICTURE_QUEUE_SIZE; i++){
1497                 is->pictq[i].target_clock= 0;
1498             }
1499             while (is->pictq_size && !is->videoq.abort_request) {
1500                 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1501             }
1502             is->video_current_pos= -1;
1503             SDL_UnlockMutex(is->pictq_mutex);
1504
1505             is->last_dts_for_fault_detection=
1506             is->last_pts_for_fault_detection= INT64_MIN;
1507             is->frame_last_pts= AV_NOPTS_VALUE;
1508             is->frame_last_delay = 0;
1509             is->frame_timer = (double)av_gettime() / 1000000.0;
1510             is->skip_frames= 1;
1511             is->skip_frames_index= 0;
1512             return 0;
1513         }
1514
1515         /* NOTE: ipts is the PTS of the _first_ picture beginning in
1516            this packet, if any */
1517         is->video_st->codec->reordered_opaque= pkt->pts;
1518         len1 = avcodec_decode_video2(is->video_st->codec,
1519                                     frame, &got_picture,
1520                                     pkt);
1521
1522         if (got_picture) {
1523             if(pkt->dts != AV_NOPTS_VALUE){
1524                 is->faulty_dts += pkt->dts <= is->last_dts_for_fault_detection;
1525                 is->last_dts_for_fault_detection= pkt->dts;
1526             }
1527             if(frame->reordered_opaque != AV_NOPTS_VALUE){
1528                 is->faulty_pts += frame->reordered_opaque <= is->last_pts_for_fault_detection;
1529                 is->last_pts_for_fault_detection= frame->reordered_opaque;
1530             }
1531         }
1532
1533         if(   (   decoder_reorder_pts==1
1534                || (decoder_reorder_pts && is->faulty_pts<is->faulty_dts)
1535                || pkt->dts == AV_NOPTS_VALUE)
1536            && frame->reordered_opaque != AV_NOPTS_VALUE)
1537             *pts= frame->reordered_opaque;
1538         else if(pkt->dts != AV_NOPTS_VALUE)
1539             *pts= pkt->dts;
1540         else
1541             *pts= 0;
1542
1543 //            if (len1 < 0)
1544 //                break;
1545     if (got_picture){
1546         is->skip_frames_index += 1;
1547         if(is->skip_frames_index >= is->skip_frames){
1548             is->skip_frames_index -= FFMAX(is->skip_frames, 1.0);
1549             return 1;
1550         }
1551
1552     }
1553     return 0;
1554 }
1555
1556 #if CONFIG_AVFILTER
1557 typedef struct {
1558     VideoState *is;
1559     AVFrame *frame;
1560 } FilterPriv;
1561
1562 static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
1563 {
1564     FilterPriv *priv = ctx->priv;
1565     if(!opaque) return -1;
1566
1567     priv->is = opaque;
1568     priv->frame = avcodec_alloc_frame();
1569
1570     return 0;
1571 }
1572
1573 static void input_uninit(AVFilterContext *ctx)
1574 {
1575     FilterPriv *priv = ctx->priv;
1576     av_free(priv->frame);
1577 }
1578
1579 static int input_request_frame(AVFilterLink *link)
1580 {
1581     FilterPriv *priv = link->src->priv;
1582     AVFilterPicRef *picref;
1583     int64_t pts = 0;
1584     AVPacket pkt;
1585     int ret;
1586
1587     while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
1588         av_free_packet(&pkt);
1589     if (ret < 0)
1590         return -1;
1591
1592     /* FIXME: until I figure out how to hook everything up to the codec
1593      * right, we're just copying the entire frame. */
1594     picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, link->w, link->h);
1595     av_picture_copy((AVPicture *)&picref->data, (AVPicture *)priv->frame,
1596                     picref->pic->format, link->w, link->h);
1597     av_free_packet(&pkt);
1598
1599     picref->pts = pts;
1600     picref->pos = pkt.pos;
1601     picref->pixel_aspect = priv->is->video_st->codec->sample_aspect_ratio;
1602     avfilter_start_frame(link, avfilter_ref_pic(picref, ~0));
1603     avfilter_draw_slice(link, 0, link->h, 1);
1604     avfilter_end_frame(link);
1605     avfilter_unref_pic(picref);
1606
1607     return 0;
1608 }
1609
1610 static int input_query_formats(AVFilterContext *ctx)
1611 {
1612     FilterPriv *priv = ctx->priv;
1613     enum PixelFormat pix_fmts[] = {
1614         priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE
1615     };
1616
1617     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1618     return 0;
1619 }
1620
1621 static int input_config_props(AVFilterLink *link)
1622 {
1623     FilterPriv *priv  = link->src->priv;
1624     AVCodecContext *c = priv->is->video_st->codec;
1625
1626     link->w = c->width;
1627     link->h = c->height;
1628
1629     return 0;
1630 }
1631
1632 static AVFilter input_filter =
1633 {
1634     .name      = "ffplay_input",
1635
1636     .priv_size = sizeof(FilterPriv),
1637
1638     .init      = input_init,
1639     .uninit    = input_uninit,
1640
1641     .query_formats = input_query_formats,
1642
1643     .inputs    = (AVFilterPad[]) {{ .name = NULL }},
1644     .outputs   = (AVFilterPad[]) {{ .name = "default",
1645                                     .type = CODEC_TYPE_VIDEO,
1646                                     .request_frame = input_request_frame,
1647                                     .config_props  = input_config_props, },
1648                                   { .name = NULL }},
1649 };
1650
1651 static void output_end_frame(AVFilterLink *link)
1652 {
1653 }
1654
1655 static int output_query_formats(AVFilterContext *ctx)
1656 {
1657     enum PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
1658
1659     avfilter_set_common_formats(ctx, avfilter_make_format_list(pix_fmts));
1660     return 0;
1661 }
1662
1663 static int get_filtered_video_frame(AVFilterContext *ctx, AVFrame *frame,
1664                                     int64_t *pts, int64_t *pos)
1665 {
1666     AVFilterPicRef *pic;
1667
1668     if(avfilter_request_frame(ctx->inputs[0]))
1669         return -1;
1670     if(!(pic = ctx->inputs[0]->cur_pic))
1671         return -1;
1672     ctx->inputs[0]->cur_pic = NULL;
1673
1674     frame->opaque = pic;
1675     *pts          = pic->pts;
1676     *pos          = pic->pos;
1677
1678     memcpy(frame->data,     pic->data,     sizeof(frame->data));
1679     memcpy(frame->linesize, pic->linesize, sizeof(frame->linesize));
1680
1681     return 1;
1682 }
1683
1684 static AVFilter output_filter =
1685 {
1686     .name      = "ffplay_output",
1687
1688     .query_formats = output_query_formats,
1689
1690     .inputs    = (AVFilterPad[]) {{ .name          = "default",
1691                                     .type          = CODEC_TYPE_VIDEO,
1692                                     .end_frame     = output_end_frame,
1693                                     .min_perms     = AV_PERM_READ, },
1694                                   { .name = NULL }},
1695     .outputs   = (AVFilterPad[]) {{ .name = NULL }},
1696 };
1697 #endif  /* CONFIG_AVFILTER */
1698
1699 static int video_thread(void *arg)
1700 {
1701     VideoState *is = arg;
1702     AVFrame *frame= avcodec_alloc_frame();
1703     int64_t pts_int;
1704     double pts;
1705     int ret;
1706
1707 #if CONFIG_AVFILTER
1708     int64_t pos;
1709     AVFilterContext *filt_src = NULL, *filt_out = NULL;
1710     AVFilterGraph *graph = av_mallocz(sizeof(AVFilterGraph));
1711     graph->scale_sws_opts = av_strdup("sws_flags=bilinear");
1712
1713     if(!(filt_src = avfilter_open(&input_filter,  "src")))   goto the_end;
1714     if(!(filt_out = avfilter_open(&output_filter, "out")))   goto the_end;
1715
1716     if(avfilter_init_filter(filt_src, NULL, is))             goto the_end;
1717     if(avfilter_init_filter(filt_out, NULL, frame))          goto the_end;
1718
1719
1720     if(vfilters) {
1721         AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
1722         AVFilterInOut *inputs  = av_malloc(sizeof(AVFilterInOut));
1723
1724         outputs->name    = av_strdup("in");
1725         outputs->filter  = filt_src;
1726         outputs->pad_idx = 0;
1727         outputs->next    = NULL;
1728
1729         inputs->name    = av_strdup("out");
1730         inputs->filter  = filt_out;
1731         inputs->pad_idx = 0;
1732         inputs->next    = NULL;
1733
1734         if (avfilter_graph_parse(graph, vfilters, inputs, outputs, NULL) < 0)
1735             goto the_end;
1736         av_freep(&vfilters);
1737     } else {
1738         if(avfilter_link(filt_src, 0, filt_out, 0) < 0)          goto the_end;
1739     }
1740     avfilter_graph_add_filter(graph, filt_src);
1741     avfilter_graph_add_filter(graph, filt_out);
1742
1743     if(avfilter_graph_check_validity(graph, NULL))           goto the_end;
1744     if(avfilter_graph_config_formats(graph, NULL))           goto the_end;
1745     if(avfilter_graph_config_links(graph, NULL))             goto the_end;
1746
1747     is->out_video_filter = filt_out;
1748 #endif
1749
1750     for(;;) {
1751 #if !CONFIG_AVFILTER
1752         AVPacket pkt;
1753 #endif
1754         while (is->paused && !is->videoq.abort_request)
1755             SDL_Delay(10);
1756 #if CONFIG_AVFILTER
1757         ret = get_filtered_video_frame(filt_out, frame, &pts_int, &pos);
1758 #else
1759         ret = get_video_frame(is, frame, &pts_int, &pkt);
1760 #endif
1761
1762         if (ret < 0) goto the_end;
1763
1764         if (!ret)
1765             continue;
1766
1767         pts = pts_int*av_q2d(is->video_st->time_base);
1768
1769 #if CONFIG_AVFILTER
1770         ret = output_picture2(is, frame, pts, pos);
1771 #else
1772         ret = output_picture2(is, frame, pts,  pkt.pos);
1773         av_free_packet(&pkt);
1774 #endif
1775         if (ret < 0)
1776             goto the_end;
1777
1778         if (step)
1779             if (cur_stream)
1780                 stream_pause(cur_stream);
1781     }
1782  the_end:
1783 #if CONFIG_AVFILTER
1784     avfilter_graph_destroy(graph);
1785     av_freep(&graph);
1786 #endif
1787     av_free(frame);
1788     return 0;
1789 }
1790
1791 static int subtitle_thread(void *arg)
1792 {
1793     VideoState *is = arg;
1794     SubPicture *sp;
1795     AVPacket pkt1, *pkt = &pkt1;
1796     int len1, got_subtitle;
1797     double pts;
1798     int i, j;
1799     int r, g, b, y, u, v, a;
1800
1801     for(;;) {
1802         while (is->paused && !is->subtitleq.abort_request) {
1803             SDL_Delay(10);
1804         }
1805         if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1806             break;
1807
1808         if(pkt->data == flush_pkt.data){
1809             avcodec_flush_buffers(is->subtitle_st->codec);
1810             continue;
1811         }
1812         SDL_LockMutex(is->subpq_mutex);
1813         while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1814                !is->subtitleq.abort_request) {
1815             SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1816         }
1817         SDL_UnlockMutex(is->subpq_mutex);
1818
1819         if (is->subtitleq.abort_request)
1820             goto the_end;
1821
1822         sp = &is->subpq[is->subpq_windex];
1823
1824        /* NOTE: ipts is the PTS of the _first_ picture beginning in
1825            this packet, if any */
1826         pts = 0;
1827         if (pkt->pts != AV_NOPTS_VALUE)
1828             pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1829
1830         len1 = avcodec_decode_subtitle2(is->subtitle_st->codec,
1831                                     &sp->sub, &got_subtitle,
1832                                     pkt);
1833 //            if (len1 < 0)
1834 //                break;
1835         if (got_subtitle && sp->sub.format == 0) {
1836             sp->pts = pts;
1837
1838             for (i = 0; i < sp->sub.num_rects; i++)
1839             {
1840                 for (j = 0; j < sp->sub.rects[i]->nb_colors; j++)
1841                 {
1842                     RGBA_IN(r, g, b, a, (uint32_t*)sp->sub.rects[i]->pict.data[1] + j);
1843                     y = RGB_TO_Y_CCIR(r, g, b);
1844                     u = RGB_TO_U_CCIR(r, g, b, 0);
1845                     v = RGB_TO_V_CCIR(r, g, b, 0);
1846                     YUVA_OUT((uint32_t*)sp->sub.rects[i]->pict.data[1] + j, y, u, v, a);
1847                 }
1848             }
1849
1850             /* now we can update the picture count */
1851             if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1852                 is->subpq_windex = 0;
1853             SDL_LockMutex(is->subpq_mutex);
1854             is->subpq_size++;
1855             SDL_UnlockMutex(is->subpq_mutex);
1856         }
1857         av_free_packet(pkt);
1858 //        if (step)
1859 //            if (cur_stream)
1860 //                stream_pause(cur_stream);
1861     }
1862  the_end:
1863     return 0;
1864 }
1865
1866 /* copy samples for viewing in editor window */
1867 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1868 {
1869     int size, len, channels;
1870
1871     channels = is->audio_st->codec->channels;
1872
1873     size = samples_size / sizeof(short);
1874     while (size > 0) {
1875         len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1876         if (len > size)
1877             len = size;
1878         memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1879         samples += len;
1880         is->sample_array_index += len;
1881         if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1882             is->sample_array_index = 0;
1883         size -= len;
1884     }
1885 }
1886
1887 /* return the new audio buffer size (samples can be added or deleted
1888    to get better sync if video or external master clock) */
1889 static int synchronize_audio(VideoState *is, short *samples,
1890                              int samples_size1, double pts)
1891 {
1892     int n, samples_size;
1893     double ref_clock;
1894
1895     n = 2 * is->audio_st->codec->channels;
1896     samples_size = samples_size1;
1897
1898     /* if not master, then we try to remove or add samples to correct the clock */
1899     if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1900          is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1901         double diff, avg_diff;
1902         int wanted_size, min_size, max_size, nb_samples;
1903
1904         ref_clock = get_master_clock(is);
1905         diff = get_audio_clock(is) - ref_clock;
1906
1907         if (diff < AV_NOSYNC_THRESHOLD) {
1908             is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1909             if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1910                 /* not enough measures to have a correct estimate */
1911                 is->audio_diff_avg_count++;
1912             } else {
1913                 /* estimate the A-V difference */
1914                 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1915
1916                 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1917                     wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1918                     nb_samples = samples_size / n;
1919
1920                     min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1921                     max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1922                     if (wanted_size < min_size)
1923                         wanted_size = min_size;
1924                     else if (wanted_size > max_size)
1925                         wanted_size = max_size;
1926
1927                     /* add or remove samples to correction the synchro */
1928                     if (wanted_size < samples_size) {
1929                         /* remove samples */
1930                         samples_size = wanted_size;
1931                     } else if (wanted_size > samples_size) {
1932                         uint8_t *samples_end, *q;
1933                         int nb;
1934
1935                         /* add samples */
1936                         nb = (samples_size - wanted_size);
1937                         samples_end = (uint8_t *)samples + samples_size - n;
1938                         q = samples_end + n;
1939                         while (nb > 0) {
1940                             memcpy(q, samples_end, n);
1941                             q += n;
1942                             nb -= n;
1943                         }
1944                         samples_size = wanted_size;
1945                     }
1946                 }
1947 #if 0
1948                 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1949                        diff, avg_diff, samples_size - samples_size1,
1950                        is->audio_clock, is->video_clock, is->audio_diff_threshold);
1951 #endif
1952             }
1953         } else {
1954             /* too big difference : may be initial PTS errors, so
1955                reset A-V filter */
1956             is->audio_diff_avg_count = 0;
1957             is->audio_diff_cum = 0;
1958         }
1959     }
1960
1961     return samples_size;
1962 }
1963
1964 /* decode one audio frame and returns its uncompressed size */
1965 static int audio_decode_frame(VideoState *is, double *pts_ptr)
1966 {
1967     AVPacket *pkt_temp = &is->audio_pkt_temp;
1968     AVPacket *pkt = &is->audio_pkt;
1969     AVCodecContext *dec= is->audio_st->codec;
1970     int n, len1, data_size;
1971     double pts;
1972
1973     for(;;) {
1974         /* NOTE: the audio packet can contain several frames */
1975         while (pkt_temp->size > 0) {
1976             data_size = sizeof(is->audio_buf1);
1977             len1 = avcodec_decode_audio3(dec,
1978                                         (int16_t *)is->audio_buf1, &data_size,
1979                                         pkt_temp);
1980             if (len1 < 0) {
1981                 /* if error, we skip the frame */
1982                 pkt_temp->size = 0;
1983                 break;
1984             }
1985
1986             pkt_temp->data += len1;
1987             pkt_temp->size -= len1;
1988             if (data_size <= 0)
1989                 continue;
1990
1991             if (dec->sample_fmt != is->audio_src_fmt) {
1992                 if (is->reformat_ctx)
1993                     av_audio_convert_free(is->reformat_ctx);
1994                 is->reformat_ctx= av_audio_convert_alloc(SAMPLE_FMT_S16, 1,
1995                                                          dec->sample_fmt, 1, NULL, 0);
1996                 if (!is->reformat_ctx) {
1997                     fprintf(stderr, "Cannot convert %s sample format to %s sample format\n",
1998                         avcodec_get_sample_fmt_name(dec->sample_fmt),
1999                         avcodec_get_sample_fmt_name(SAMPLE_FMT_S16));
2000                         break;
2001                 }
2002                 is->audio_src_fmt= dec->sample_fmt;
2003             }
2004
2005             if (is->reformat_ctx) {
2006                 const void *ibuf[6]= {is->audio_buf1};
2007                 void *obuf[6]= {is->audio_buf2};
2008                 int istride[6]= {av_get_bits_per_sample_format(dec->sample_fmt)/8};
2009                 int ostride[6]= {2};
2010                 int len= data_size/istride[0];
2011                 if (av_audio_convert(is->reformat_ctx, obuf, ostride, ibuf, istride, len)<0) {
2012                     printf("av_audio_convert() failed\n");
2013                     break;
2014                 }
2015                 is->audio_buf= is->audio_buf2;
2016                 /* FIXME: existing code assume that data_size equals framesize*channels*2
2017                           remove this legacy cruft */
2018                 data_size= len*2;
2019             }else{
2020                 is->audio_buf= is->audio_buf1;
2021             }
2022
2023             /* if no pts, then compute it */
2024             pts = is->audio_clock;
2025             *pts_ptr = pts;
2026             n = 2 * dec->channels;
2027             is->audio_clock += (double)data_size /
2028                 (double)(n * dec->sample_rate);
2029 #if defined(DEBUG_SYNC)
2030             {
2031                 static double last_clock;
2032                 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
2033                        is->audio_clock - last_clock,
2034                        is->audio_clock, pts);
2035                 last_clock = is->audio_clock;
2036             }
2037 #endif
2038             return data_size;
2039         }
2040
2041         /* free the current packet */
2042         if (pkt->data)
2043             av_free_packet(pkt);
2044
2045         if (is->paused || is->audioq.abort_request) {
2046             return -1;
2047         }
2048
2049         /* read next packet */
2050         if (packet_queue_get(&is->audioq, pkt, 1) < 0)
2051             return -1;
2052         if(pkt->data == flush_pkt.data){
2053             avcodec_flush_buffers(dec);
2054             continue;
2055         }
2056
2057         pkt_temp->data = pkt->data;
2058         pkt_temp->size = pkt->size;
2059
2060         /* if update the audio clock with the pts */
2061         if (pkt->pts != AV_NOPTS_VALUE) {
2062             is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
2063         }
2064     }
2065 }
2066
2067 /* get the current audio output buffer size, in samples. With SDL, we
2068    cannot have a precise information */
2069 static int audio_write_get_buf_size(VideoState *is)
2070 {
2071     return is->audio_buf_size - is->audio_buf_index;
2072 }
2073
2074
2075 /* prepare a new audio buffer */
2076 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2077 {
2078     VideoState *is = opaque;
2079     int audio_size, len1;
2080     double pts;
2081
2082     audio_callback_time = av_gettime();
2083
2084     while (len > 0) {
2085         if (is->audio_buf_index >= is->audio_buf_size) {
2086            audio_size = audio_decode_frame(is, &pts);
2087            if (audio_size < 0) {
2088                 /* if error, just output silence */
2089                is->audio_buf = is->audio_buf1;
2090                is->audio_buf_size = 1024;
2091                memset(is->audio_buf, 0, is->audio_buf_size);
2092            } else {
2093                if (is->show_audio)
2094                    update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2095                audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
2096                                               pts);
2097                is->audio_buf_size = audio_size;
2098            }
2099            is->audio_buf_index = 0;
2100         }
2101         len1 = is->audio_buf_size - is->audio_buf_index;
2102         if (len1 > len)
2103             len1 = len;
2104         memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2105         len -= len1;
2106         stream += len1;
2107         is->audio_buf_index += len1;
2108     }
2109 }
2110
2111 /* open a given stream. Return 0 if OK */
2112 static int stream_component_open(VideoState *is, int stream_index)
2113 {
2114     AVFormatContext *ic = is->ic;
2115     AVCodecContext *avctx;
2116     AVCodec *codec;
2117     SDL_AudioSpec wanted_spec, spec;
2118
2119     if (stream_index < 0 || stream_index >= ic->nb_streams)
2120         return -1;
2121     avctx = ic->streams[stream_index]->codec;
2122
2123     /* prepare audio output */
2124     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2125         if (avctx->channels > 0) {
2126             avctx->request_channels = FFMIN(2, avctx->channels);
2127         } else {
2128             avctx->request_channels = 2;
2129         }
2130     }
2131
2132     codec = avcodec_find_decoder(avctx->codec_id);
2133     avctx->debug_mv = debug_mv;
2134     avctx->debug = debug;
2135     avctx->workaround_bugs = workaround_bugs;
2136     avctx->lowres = lowres;
2137     if(lowres) avctx->flags |= CODEC_FLAG_EMU_EDGE;
2138     avctx->idct_algo= idct;
2139     if(fast) avctx->flags2 |= CODEC_FLAG2_FAST;
2140     avctx->skip_frame= skip_frame;
2141     avctx->skip_idct= skip_idct;
2142     avctx->skip_loop_filter= skip_loop_filter;
2143     avctx->error_recognition= error_recognition;
2144     avctx->error_concealment= error_concealment;
2145     avcodec_thread_init(avctx, thread_count);
2146
2147     set_context_opts(avctx, avcodec_opts[avctx->codec_type], 0);
2148
2149     if (!codec ||
2150         avcodec_open(avctx, codec) < 0)
2151         return -1;
2152
2153     /* prepare audio output */
2154     if (avctx->codec_type == CODEC_TYPE_AUDIO) {
2155         wanted_spec.freq = avctx->sample_rate;
2156         wanted_spec.format = AUDIO_S16SYS;
2157         wanted_spec.channels = avctx->channels;
2158         wanted_spec.silence = 0;
2159         wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
2160         wanted_spec.callback = sdl_audio_callback;
2161         wanted_spec.userdata = is;
2162         if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
2163             fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
2164             return -1;
2165         }
2166         is->audio_hw_buf_size = spec.size;
2167         is->audio_src_fmt= SAMPLE_FMT_S16;
2168     }
2169
2170     ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2171     switch(avctx->codec_type) {
2172     case CODEC_TYPE_AUDIO:
2173         is->audio_stream = stream_index;
2174         is->audio_st = ic->streams[stream_index];
2175         is->audio_buf_size = 0;
2176         is->audio_buf_index = 0;
2177
2178         /* init averaging filter */
2179         is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2180         is->audio_diff_avg_count = 0;
2181         /* since we do not have a precise anough audio fifo fullness,
2182            we correct audio sync only if larger than this threshold */
2183         is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / avctx->sample_rate;
2184
2185         memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
2186         packet_queue_init(&is->audioq);
2187         SDL_PauseAudio(0);
2188         break;
2189     case CODEC_TYPE_VIDEO:
2190         is->video_stream = stream_index;
2191         is->video_st = ic->streams[stream_index];
2192
2193 //        is->video_current_pts_time = av_gettime();
2194
2195         packet_queue_init(&is->videoq);
2196         is->video_tid = SDL_CreateThread(video_thread, is);
2197         break;
2198     case CODEC_TYPE_SUBTITLE:
2199         is->subtitle_stream = stream_index;
2200         is->subtitle_st = ic->streams[stream_index];
2201         packet_queue_init(&is->subtitleq);
2202
2203         is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
2204         break;
2205     default:
2206         break;
2207     }
2208     return 0;
2209 }
2210
2211 static void stream_component_close(VideoState *is, int stream_index)
2212 {
2213     AVFormatContext *ic = is->ic;
2214     AVCodecContext *avctx;
2215
2216     if (stream_index < 0 || stream_index >= ic->nb_streams)
2217         return;
2218     avctx = ic->streams[stream_index]->codec;
2219
2220     switch(avctx->codec_type) {
2221     case CODEC_TYPE_AUDIO:
2222         packet_queue_abort(&is->audioq);
2223
2224         SDL_CloseAudio();
2225
2226         packet_queue_end(&is->audioq);
2227         if (is->reformat_ctx)
2228             av_audio_convert_free(is->reformat_ctx);
2229         is->reformat_ctx = NULL;
2230         break;
2231     case CODEC_TYPE_VIDEO:
2232         packet_queue_abort(&is->videoq);
2233
2234         /* note: we also signal this mutex to make sure we deblock the
2235            video thread in all cases */
2236         SDL_LockMutex(is->pictq_mutex);
2237         SDL_CondSignal(is->pictq_cond);
2238         SDL_UnlockMutex(is->pictq_mutex);
2239
2240         SDL_WaitThread(is->video_tid, NULL);
2241
2242         packet_queue_end(&is->videoq);
2243         break;
2244     case CODEC_TYPE_SUBTITLE:
2245         packet_queue_abort(&is->subtitleq);
2246
2247         /* note: we also signal this mutex to make sure we deblock the
2248            video thread in all cases */
2249         SDL_LockMutex(is->subpq_mutex);
2250         is->subtitle_stream_changed = 1;
2251
2252         SDL_CondSignal(is->subpq_cond);
2253         SDL_UnlockMutex(is->subpq_mutex);
2254
2255         SDL_WaitThread(is->subtitle_tid, NULL);
2256
2257         packet_queue_end(&is->subtitleq);
2258         break;
2259     default:
2260         break;
2261     }
2262
2263     ic->streams[stream_index]->discard = AVDISCARD_ALL;
2264     avcodec_close(avctx);
2265     switch(avctx->codec_type) {
2266     case CODEC_TYPE_AUDIO:
2267         is->audio_st = NULL;
2268         is->audio_stream = -1;
2269         break;
2270     case CODEC_TYPE_VIDEO:
2271         is->video_st = NULL;
2272         is->video_stream = -1;
2273         break;
2274     case CODEC_TYPE_SUBTITLE:
2275         is->subtitle_st = NULL;
2276         is->subtitle_stream = -1;
2277         break;
2278     default:
2279         break;
2280     }
2281 }
2282
2283 /* since we have only one decoding thread, we can use a global
2284    variable instead of a thread local variable */
2285 static VideoState *global_video_state;
2286
2287 static int decode_interrupt_cb(void)
2288 {
2289     return (global_video_state && global_video_state->abort_request);
2290 }
2291
2292 /* this thread gets the stream from the disk or the network */
2293 static int decode_thread(void *arg)
2294 {
2295     VideoState *is = arg;
2296     AVFormatContext *ic;
2297     int err, i, ret;
2298     int st_index[CODEC_TYPE_NB];
2299     int st_count[CODEC_TYPE_NB]={0};
2300     int st_best_packet_count[CODEC_TYPE_NB];
2301     AVPacket pkt1, *pkt = &pkt1;
2302     AVFormatParameters params, *ap = &params;
2303     int eof=0;
2304
2305     ic = avformat_alloc_context();
2306
2307     memset(st_index, -1, sizeof(st_index));
2308     memset(st_best_packet_count, -1, sizeof(st_best_packet_count));
2309     is->video_stream = -1;
2310     is->audio_stream = -1;
2311     is->subtitle_stream = -1;
2312
2313     global_video_state = is;
2314     url_set_interrupt_cb(decode_interrupt_cb);
2315
2316     memset(ap, 0, sizeof(*ap));
2317
2318     ap->prealloced_context = 1;
2319     ap->width = frame_width;
2320     ap->height= frame_height;
2321     ap->time_base= (AVRational){1, 25};
2322     ap->pix_fmt = frame_pix_fmt;
2323
2324     set_context_opts(ic, avformat_opts, AV_OPT_FLAG_DECODING_PARAM);
2325
2326     err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
2327     if (err < 0) {
2328         print_error(is->filename, err);
2329         ret = -1;
2330         goto fail;
2331     }
2332     is->ic = ic;
2333
2334     if(genpts)
2335         ic->flags |= AVFMT_FLAG_GENPTS;
2336
2337     err = av_find_stream_info(ic);
2338     if (err < 0) {
2339         fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
2340         ret = -1;
2341         goto fail;
2342     }
2343     if(ic->pb)
2344         ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
2345
2346     if(seek_by_bytes<0)
2347         seek_by_bytes= !!(ic->iformat->flags & AVFMT_TS_DISCONT);
2348
2349     /* if seeking requested, we execute it */
2350     if (start_time != AV_NOPTS_VALUE) {
2351         int64_t timestamp;
2352
2353         timestamp = start_time;
2354         /* add the stream start time */
2355         if (ic->start_time != AV_NOPTS_VALUE)
2356             timestamp += ic->start_time;
2357         ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2358         if (ret < 0) {
2359             fprintf(stderr, "%s: could not seek to position %0.3f\n",
2360                     is->filename, (double)timestamp / AV_TIME_BASE);
2361         }
2362     }
2363
2364     for(i = 0; i < ic->nb_streams; i++) {
2365         AVStream *st= ic->streams[i];
2366         AVCodecContext *avctx = st->codec;
2367         ic->streams[i]->discard = AVDISCARD_ALL;
2368         if(avctx->codec_type >= (unsigned)CODEC_TYPE_NB)
2369             continue;
2370         if(st_count[avctx->codec_type]++ != wanted_stream[avctx->codec_type] && wanted_stream[avctx->codec_type] >= 0)
2371             continue;
2372
2373         if(st_best_packet_count[avctx->codec_type] >= st->codec_info_nb_frames)
2374             continue;
2375         st_best_packet_count[avctx->codec_type]= st->codec_info_nb_frames;
2376
2377         switch(avctx->codec_type) {
2378         case CODEC_TYPE_AUDIO:
2379             if (!audio_disable)
2380                 st_index[CODEC_TYPE_AUDIO] = i;
2381             break;
2382         case CODEC_TYPE_VIDEO:
2383         case CODEC_TYPE_SUBTITLE:
2384             if (!video_disable)
2385                 st_index[avctx->codec_type] = i;
2386             break;
2387         default:
2388             break;
2389         }
2390     }
2391     if (show_status) {
2392         dump_format(ic, 0, is->filename, 0);
2393     }
2394
2395     /* open the streams */
2396     if (st_index[CODEC_TYPE_AUDIO] >= 0) {
2397         stream_component_open(is, st_index[CODEC_TYPE_AUDIO]);
2398     }
2399
2400     ret=-1;
2401     if (st_index[CODEC_TYPE_VIDEO] >= 0) {
2402         ret= stream_component_open(is, st_index[CODEC_TYPE_VIDEO]);
2403     }
2404     is->refresh_tid = SDL_CreateThread(refresh_thread, is);
2405     if(ret<0) {
2406         if (!display_disable)
2407             is->show_audio = 2;
2408     }
2409
2410     if (st_index[CODEC_TYPE_SUBTITLE] >= 0) {
2411         stream_component_open(is, st_index[CODEC_TYPE_SUBTITLE]);
2412     }
2413
2414     if (is->video_stream < 0 && is->audio_stream < 0) {
2415         fprintf(stderr, "%s: could not open codecs\n", is->filename);
2416         ret = -1;
2417         goto fail;
2418     }
2419
2420     for(;;) {
2421         if (is->abort_request)
2422             break;
2423         if (is->paused != is->last_paused) {
2424             is->last_paused = is->paused;
2425             if (is->paused)
2426                 is->read_pause_return= av_read_pause(ic);
2427             else
2428                 av_read_play(ic);
2429         }
2430 #if CONFIG_RTSP_DEMUXER
2431         if (is->paused && !strcmp(ic->iformat->name, "rtsp")) {
2432             /* wait 10 ms to avoid trying to get another packet */
2433             /* XXX: horrible */
2434             SDL_Delay(10);
2435             continue;
2436         }
2437 #endif
2438         if (is->seek_req) {
2439             int64_t seek_target= is->seek_pos;
2440             int64_t seek_min= is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2441             int64_t seek_max= is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2442 //FIXME the +-2 is due to rounding being not done in the correct direction in generation
2443 //      of the seek_pos/seek_rel variables
2444
2445             ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2446             if (ret < 0) {
2447                 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2448             }else{
2449                 if (is->audio_stream >= 0) {
2450                     packet_queue_flush(&is->audioq);
2451                     packet_queue_put(&is->audioq, &flush_pkt);
2452                 }
2453                 if (is->subtitle_stream >= 0) {
2454                     packet_queue_flush(&is->subtitleq);
2455                     packet_queue_put(&is->subtitleq, &flush_pkt);
2456                 }
2457                 if (is->video_stream >= 0) {
2458                     packet_queue_flush(&is->videoq);
2459                     packet_queue_put(&is->videoq, &flush_pkt);
2460                 }
2461             }
2462             is->seek_req = 0;
2463             eof= 0;
2464         }
2465
2466         /* if the queue are full, no need to read more */
2467         if (   is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2468             || (   (is->audioq   .size  > MIN_AUDIOQ_SIZE || is->audio_stream<0)
2469                 && (is->videoq   .nb_packets > MIN_FRAMES || is->video_stream<0)
2470                 && (is->subtitleq.nb_packets > MIN_FRAMES || is->subtitle_stream<0))) {
2471             /* wait 10 ms */
2472             SDL_Delay(10);
2473             continue;
2474         }
2475         if(url_feof(ic->pb) || eof) {
2476             if(is->video_stream >= 0){
2477                 av_init_packet(pkt);
2478                 pkt->data=NULL;
2479                 pkt->size=0;
2480                 pkt->stream_index= is->video_stream;
2481                 packet_queue_put(&is->videoq, pkt);
2482             }
2483             SDL_Delay(10);
2484             if(autoexit && is->audioq.size + is->videoq.size + is->subtitleq.size ==0){
2485                 ret=AVERROR_EOF;
2486                 goto fail;
2487             }
2488             continue;
2489         }
2490         ret = av_read_frame(ic, pkt);
2491         if (ret < 0) {
2492             if (ret == AVERROR_EOF)
2493                 eof=1;
2494             if (url_ferror(ic->pb))
2495                 break;
2496             SDL_Delay(100); /* wait for user event */
2497             continue;
2498         }
2499         if (pkt->stream_index == is->audio_stream) {
2500             packet_queue_put(&is->audioq, pkt);
2501         } else if (pkt->stream_index == is->video_stream) {
2502             packet_queue_put(&is->videoq, pkt);
2503         } else if (pkt->stream_index == is->subtitle_stream) {
2504             packet_queue_put(&is->subtitleq, pkt);
2505         } else {
2506             av_free_packet(pkt);
2507         }
2508     }
2509     /* wait until the end */
2510     while (!is->abort_request) {
2511         SDL_Delay(100);
2512     }
2513
2514     ret = 0;
2515  fail:
2516     /* disable interrupting */
2517     global_video_state = NULL;
2518
2519     /* close each stream */
2520     if (is->audio_stream >= 0)
2521         stream_component_close(is, is->audio_stream);
2522     if (is->video_stream >= 0)
2523         stream_component_close(is, is->video_stream);
2524     if (is->subtitle_stream >= 0)
2525         stream_component_close(is, is->subtitle_stream);
2526     if (is->ic) {
2527         av_close_input_file(is->ic);
2528         is->ic = NULL; /* safety */
2529     }
2530     url_set_interrupt_cb(NULL);
2531
2532     if (ret != 0) {
2533         SDL_Event event;
2534
2535         event.type = FF_QUIT_EVENT;
2536         event.user.data1 = is;
2537         SDL_PushEvent(&event);
2538     }
2539     return 0;
2540 }
2541
2542 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2543 {
2544     VideoState *is;
2545
2546     is = av_mallocz(sizeof(VideoState));
2547     if (!is)
2548         return NULL;
2549     av_strlcpy(is->filename, filename, sizeof(is->filename));
2550     is->iformat = iformat;
2551     is->ytop = 0;
2552     is->xleft = 0;
2553
2554     /* start video display */
2555     is->pictq_mutex = SDL_CreateMutex();
2556     is->pictq_cond = SDL_CreateCond();
2557
2558     is->subpq_mutex = SDL_CreateMutex();
2559     is->subpq_cond = SDL_CreateCond();
2560
2561     is->av_sync_type = av_sync_type;
2562     is->parse_tid = SDL_CreateThread(decode_thread, is);
2563     if (!is->parse_tid) {
2564         av_free(is);
2565         return NULL;
2566     }
2567     return is;
2568 }
2569
2570 static void stream_close(VideoState *is)
2571 {
2572     VideoPicture *vp;
2573     int i;
2574     /* XXX: use a special url_shutdown call to abort parse cleanly */
2575     is->abort_request = 1;
2576     SDL_WaitThread(is->parse_tid, NULL);
2577     SDL_WaitThread(is->refresh_tid, NULL);
2578
2579     /* free all pictures */
2580     for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2581         vp = &is->pictq[i];
2582 #if CONFIG_AVFILTER
2583         if (vp->picref) {
2584             avfilter_unref_pic(vp->picref);
2585             vp->picref = NULL;
2586         }
2587 #endif
2588         if (vp->bmp) {
2589             SDL_FreeYUVOverlay(vp->bmp);
2590             vp->bmp = NULL;
2591         }
2592     }
2593     SDL_DestroyMutex(is->pictq_mutex);
2594     SDL_DestroyCond(is->pictq_cond);
2595     SDL_DestroyMutex(is->subpq_mutex);
2596     SDL_DestroyCond(is->subpq_cond);
2597 #if !CONFIG_AVFILTER
2598     if (is->img_convert_ctx)
2599         sws_freeContext(is->img_convert_ctx);
2600 #endif
2601     av_free(is);
2602 }
2603
2604 static void stream_cycle_channel(VideoState *is, int codec_type)
2605 {
2606     AVFormatContext *ic = is->ic;
2607     int start_index, stream_index;
2608     AVStream *st;
2609
2610     if (codec_type == CODEC_TYPE_VIDEO)
2611         start_index = is->video_stream;
2612     else if (codec_type == CODEC_TYPE_AUDIO)
2613         start_index = is->audio_stream;
2614     else
2615         start_index = is->subtitle_stream;
2616     if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2617         return;
2618     stream_index = start_index;
2619     for(;;) {
2620         if (++stream_index >= is->ic->nb_streams)
2621         {
2622             if (codec_type == CODEC_TYPE_SUBTITLE)
2623             {
2624                 stream_index = -1;
2625                 goto the_end;
2626             } else
2627                 stream_index = 0;
2628         }
2629         if (stream_index == start_index)
2630             return;
2631         st = ic->streams[stream_index];
2632         if (st->codec->codec_type == codec_type) {
2633             /* check that parameters are OK */
2634             switch(codec_type) {
2635             case CODEC_TYPE_AUDIO:
2636                 if (st->codec->sample_rate != 0 &&
2637                     st->codec->channels != 0)
2638                     goto the_end;
2639                 break;
2640             case CODEC_TYPE_VIDEO:
2641             case CODEC_TYPE_SUBTITLE:
2642                 goto the_end;
2643             default:
2644                 break;
2645             }
2646         }
2647     }
2648  the_end:
2649     stream_component_close(is, start_index);
2650     stream_component_open(is, stream_index);
2651 }
2652
2653
2654 static void toggle_full_screen(void)
2655 {
2656     is_full_screen = !is_full_screen;
2657     if (!fs_screen_width) {
2658         /* use default SDL method */
2659 //        SDL_WM_ToggleFullScreen(screen);
2660     }
2661     video_open(cur_stream);
2662 }
2663
2664 static void toggle_pause(void)
2665 {
2666     if (cur_stream)
2667         stream_pause(cur_stream);
2668     step = 0;
2669 }
2670
2671 static void step_to_next_frame(void)
2672 {
2673     if (cur_stream) {
2674         /* if the stream is paused unpause it, then step */
2675         if (cur_stream->paused)
2676             stream_pause(cur_stream);
2677     }
2678     step = 1;
2679 }
2680
2681 static void do_exit(void)
2682 {
2683     int i;
2684     if (cur_stream) {
2685         stream_close(cur_stream);
2686         cur_stream = NULL;
2687     }
2688     for (i = 0; i < CODEC_TYPE_NB; i++)
2689         av_free(avcodec_opts[i]);
2690     av_free(avformat_opts);
2691     av_free(sws_opts);
2692 #if CONFIG_AVFILTER
2693     avfilter_uninit();
2694 #endif
2695     if (show_status)
2696         printf("\n");
2697     SDL_Quit();
2698     exit(0);
2699 }
2700
2701 static void toggle_audio_display(void)
2702 {
2703     if (cur_stream) {
2704         int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
2705         cur_stream->show_audio = (cur_stream->show_audio + 1) % 3;
2706         fill_rectangle(screen,
2707                     cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height,
2708                     bgcolor);
2709         SDL_UpdateRect(screen, cur_stream->xleft, cur_stream->ytop, cur_stream->width, cur_stream->height);
2710     }
2711 }
2712
2713 /* handle an event sent by the GUI */
2714 static void event_loop(void)
2715 {
2716     SDL_Event event;
2717     double incr, pos, frac;
2718
2719     for(;;) {
2720         double x;
2721         SDL_WaitEvent(&event);
2722         switch(event.type) {
2723         case SDL_KEYDOWN:
2724             switch(event.key.keysym.sym) {
2725             case SDLK_ESCAPE:
2726             case SDLK_q:
2727                 do_exit();
2728                 break;
2729             case SDLK_f:
2730                 toggle_full_screen();
2731                 break;
2732             case SDLK_p:
2733             case SDLK_SPACE:
2734                 toggle_pause();
2735                 break;
2736             case SDLK_s: //S: Step to next frame
2737                 step_to_next_frame();
2738                 break;
2739             case SDLK_a:
2740                 if (cur_stream)
2741                     stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2742                 break;
2743             case SDLK_v:
2744                 if (cur_stream)
2745                     stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2746                 break;
2747             case SDLK_t:
2748                 if (cur_stream)
2749                     stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2750                 break;
2751             case SDLK_w:
2752                 toggle_audio_display();
2753                 break;
2754             case SDLK_LEFT:
2755                 incr = -10.0;
2756                 goto do_seek;
2757             case SDLK_RIGHT:
2758                 incr = 10.0;
2759                 goto do_seek;
2760             case SDLK_UP:
2761                 incr = 60.0;
2762                 goto do_seek;
2763             case SDLK_DOWN:
2764                 incr = -60.0;
2765             do_seek:
2766                 if (cur_stream) {
2767                     if (seek_by_bytes) {
2768                         if (cur_stream->video_stream >= 0 && cur_stream->video_current_pos>=0){
2769                             pos= cur_stream->video_current_pos;
2770                         }else if(cur_stream->audio_stream >= 0 && cur_stream->audio_pkt.pos>=0){
2771                             pos= cur_stream->audio_pkt.pos;
2772                         }else
2773                             pos = url_ftell(cur_stream->ic->pb);
2774                         if (cur_stream->ic->bit_rate)
2775                             incr *= cur_stream->ic->bit_rate / 8.0;
2776                         else
2777                             incr *= 180000.0;
2778                         pos += incr;
2779                         stream_seek(cur_stream, pos, incr, 1);
2780                     } else {
2781                         pos = get_master_clock(cur_stream);
2782                         pos += incr;
2783                         stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
2784                     }
2785                 }
2786                 break;
2787             default:
2788                 break;
2789             }
2790             break;
2791         case SDL_MOUSEBUTTONDOWN:
2792         case SDL_MOUSEMOTION:
2793             if(event.type ==SDL_MOUSEBUTTONDOWN){
2794                 x= event.button.x;
2795             }else{
2796                 if(event.motion.state != SDL_PRESSED)
2797                     break;
2798                 x= event.motion.x;
2799             }
2800             if (cur_stream) {
2801                 if(seek_by_bytes || cur_stream->ic->duration<=0){
2802                     uint64_t size=  url_fsize(cur_stream->ic->pb);
2803                     stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
2804                 }else{
2805                     int64_t ts;
2806                     int ns, hh, mm, ss;
2807                     int tns, thh, tmm, tss;
2808                     tns = cur_stream->ic->duration/1000000LL;
2809                     thh = tns/3600;
2810                     tmm = (tns%3600)/60;
2811                     tss = (tns%60);
2812                     frac = x/cur_stream->width;
2813                     ns = frac*tns;
2814                     hh = ns/3600;
2815                     mm = (ns%3600)/60;
2816                     ss = (ns%60);
2817                     fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d)       \n", frac*100,
2818                             hh, mm, ss, thh, tmm, tss);
2819                     ts = frac*cur_stream->ic->duration;
2820                     if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
2821                         ts += cur_stream->ic->start_time;
2822                     stream_seek(cur_stream, ts, 0, 0);
2823                 }
2824             }
2825             break;
2826         case SDL_VIDEORESIZE:
2827             if (cur_stream) {
2828                 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2829                                           SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2830                 screen_width = cur_stream->width = event.resize.w;
2831                 screen_height= cur_stream->height= event.resize.h;
2832             }
2833             break;
2834         case SDL_QUIT:
2835         case FF_QUIT_EVENT:
2836             do_exit();
2837             break;
2838         case FF_ALLOC_EVENT:
2839             video_open(event.user.data1);
2840             alloc_picture(event.user.data1);
2841             break;
2842         case FF_REFRESH_EVENT:
2843             video_refresh_timer(event.user.data1);
2844             cur_stream->refresh=0;
2845             break;
2846         default:
2847             break;
2848         }
2849     }
2850 }
2851
2852 static void opt_frame_size(const char *arg)
2853 {
2854     if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2855         fprintf(stderr, "Incorrect frame size\n");
2856         exit(1);
2857     }
2858     if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2859         fprintf(stderr, "Frame size must be a multiple of 2\n");
2860         exit(1);
2861     }
2862 }
2863
2864 static int opt_width(const char *opt, const char *arg)
2865 {
2866     screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2867     return 0;
2868 }
2869
2870 static int opt_height(const char *opt, const char *arg)
2871 {
2872     screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2873     return 0;
2874 }
2875
2876 static void opt_format(const char *arg)
2877 {
2878     file_iformat = av_find_input_format(arg);
2879     if (!file_iformat) {
2880         fprintf(stderr, "Unknown input format: %s\n", arg);
2881         exit(1);
2882     }
2883 }
2884
2885 static void opt_frame_pix_fmt(const char *arg)
2886 {
2887     frame_pix_fmt = av_get_pix_fmt(arg);
2888 }
2889
2890 static int opt_sync(const char *opt, const char *arg)
2891 {
2892     if (!strcmp(arg, "audio"))
2893         av_sync_type = AV_SYNC_AUDIO_MASTER;
2894     else if (!strcmp(arg, "video"))
2895         av_sync_type = AV_SYNC_VIDEO_MASTER;
2896     else if (!strcmp(arg, "ext"))
2897         av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2898     else {
2899         fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2900         exit(1);
2901     }
2902     return 0;
2903 }
2904
2905 static int opt_seek(const char *opt, const char *arg)
2906 {
2907     start_time = parse_time_or_die(opt, arg, 1);
2908     return 0;
2909 }
2910
2911 static int opt_debug(const char *opt, const char *arg)
2912 {
2913     av_log_set_level(99);
2914     debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2915     return 0;
2916 }
2917
2918 static int opt_vismv(const char *opt, const char *arg)
2919 {
2920     debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2921     return 0;
2922 }
2923
2924 static int opt_thread_count(const char *opt, const char *arg)
2925 {
2926     thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2927 #if !HAVE_THREADS
2928     fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2929 #endif
2930     return 0;
2931 }
2932
2933 static const OptionDef options[] = {
2934 #include "cmdutils_common_opts.h"
2935     { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2936     { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2937     { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2938     { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2939     { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2940     { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2941     { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_AUDIO]}, "select desired audio stream", "stream_number" },
2942     { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_VIDEO]}, "select desired video stream", "stream_number" },
2943     { "sst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_stream[CODEC_TYPE_SUBTITLE]}, "select desired subtitle stream", "stream_number" },
2944     { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2945     { "bytes", OPT_INT | HAS_ARG, {(void*)&seek_by_bytes}, "seek by bytes 0=off 1=on -1=auto", "val" },
2946     { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2947     { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2948     { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2949     { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2950     { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2951     { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2952     { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2953     { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2954     { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2955     { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts 0=off 1=on -1=auto", ""},
2956     { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2957     { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2958     { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2959     { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2960     { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo",  "algo" },
2961     { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_recognition}, "set error detection threshold (0-4)",  "threshold" },
2962     { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options",  "bit_mask" },
2963     { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2964     { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2965     { "autoexit", OPT_BOOL | OPT_EXPERT, {(void*)&autoexit}, "exit at the end", "" },
2966     { "framedrop", OPT_BOOL | OPT_EXPERT, {(void*)&framedrop}, "drop frames when cpu is too slow", "" },
2967     { "window_title", OPT_STRING | HAS_ARG, {(void*)&window_title}, "set window title", "window title" },
2968 #if CONFIG_AVFILTER
2969     { "vfilters", OPT_STRING | HAS_ARG, {(void*)&vfilters}, "video filters", "filter list" },
2970 #endif
2971     { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, {(void*)&rdftspeed}, "rdft speed", "msecs" },
2972     { "default", OPT_FUNC2 | HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
2973     { NULL, },
2974 };
2975
2976 static void show_usage(void)
2977 {
2978     printf("Simple media player\n");
2979     printf("usage: ffplay [options] input_file\n");
2980     printf("\n");
2981 }
2982
2983 static void show_help(void)
2984 {
2985     show_usage();
2986     show_help_options(options, "Main options:\n",
2987                       OPT_EXPERT, 0);
2988     show_help_options(options, "\nAdvanced options:\n",
2989                       OPT_EXPERT, OPT_EXPERT);
2990     printf("\nWhile playing:\n"
2991            "q, ESC              quit\n"
2992            "f                   toggle full screen\n"
2993            "p, SPC              pause\n"
2994            "a                   cycle audio channel\n"
2995            "v                   cycle video channel\n"
2996            "t                   cycle subtitle channel\n"
2997            "w                   show audio waves\n"
2998            "left/right          seek backward/forward 10 seconds\n"
2999            "down/up             seek backward/forward 1 minute\n"
3000            "mouse click         seek to percentage in file corresponding to fraction of width\n"
3001            );
3002 }
3003
3004 static void opt_input_file(const char *filename)
3005 {
3006     if (input_filename) {
3007         fprintf(stderr, "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3008                 filename, input_filename);
3009         exit(1);
3010     }
3011     if (!strcmp(filename, "-"))
3012         filename = "pipe:";
3013     input_filename = filename;
3014 }
3015
3016 /* Called from the main */
3017 int main(int argc, char **argv)
3018 {
3019     int flags, i;
3020
3021     /* register all codecs, demux and protocols */
3022     avcodec_register_all();
3023     avdevice_register_all();
3024 #if CONFIG_AVFILTER
3025     avfilter_register_all();
3026 #endif
3027     av_register_all();
3028
3029     for(i=0; i<CODEC_TYPE_NB; i++){
3030         avcodec_opts[i]= avcodec_alloc_context2(i);
3031     }
3032     avformat_opts = avformat_alloc_context();
3033 #if !CONFIG_AVFILTER
3034     sws_opts = sws_getContext(16,16,0, 16,16,0, sws_flags, NULL,NULL,NULL);
3035 #endif
3036
3037     show_banner();
3038
3039     parse_options(argc, argv, options, opt_input_file);
3040
3041     if (!input_filename) {
3042         show_usage();
3043         fprintf(stderr, "An input file must be specified\n");
3044         fprintf(stderr, "Use -h to get full help or, even better, run 'man ffplay'\n");
3045         exit(1);
3046     }
3047
3048     if (display_disable) {
3049         video_disable = 1;
3050     }
3051     flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3052 #if !defined(__MINGW32__) && !defined(__APPLE__)
3053     flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
3054 #endif
3055     if (SDL_Init (flags)) {
3056         fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
3057         exit(1);
3058     }
3059
3060     if (!display_disable) {
3061 #if HAVE_SDL_VIDEO_SIZE
3062         const SDL_VideoInfo *vi = SDL_GetVideoInfo();
3063         fs_screen_width = vi->current_w;
3064         fs_screen_height = vi->current_h;
3065 #endif
3066     }
3067
3068     SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
3069     SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3070     SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3071
3072     av_init_packet(&flush_pkt);
3073     flush_pkt.data= "FLUSH";
3074
3075     cur_stream = stream_open(input_filename, file_iformat);
3076
3077     event_loop();
3078
3079     /* never returns */
3080
3081     return 0;
3082 }