Merge remote-tracking branch 'qatar/master'
authorMichael Niedermayer <michaelni@gmx.at>
Tue, 6 Mar 2012 02:56:25 +0000 (03:56 +0100)
committerMichael Niedermayer <michaelni@gmx.at>
Tue, 6 Mar 2012 05:03:32 +0000 (06:03 +0100)
* qatar/master: (31 commits)
  cdxl demux: do not create packets with uninitialized data at EOF.
  Replace computations of remaining bits with calls to get_bits_left().
  amrnb/amrwb: Remove get_bits usage.
  cosmetics: reindent
  avformat: do not require a pixel/sample format if there is no decoder
  avformat: do not fill-in audio packet duration in compute_pkt_fields()
  lavf: Use av_get_audio_frame_duration() in get_audio_frame_size()
  dca_parser: parse the sample rate and frame durations
  libspeexdec: do not set AVCodecContext.frame_size
  libopencore-amr: do not set AVCodecContext.frame_size
  alsdec: do not set AVCodecContext.frame_size
  siff: do not set AVCodecContext.frame_size
  amr demuxer: do not set AVCodecContext.frame_size.
  aiffdec: do not set AVCodecContext.frame_size
  mov: do not set AVCodecContext.frame_size
  ape: do not set AVCodecContext.frame_size.
  rdt: remove workaround for infinite loop with aac
  avformat: do not require frame_size in avformat_find_stream_info() for CELT
  avformat: do not require frame_size in avformat_find_stream_info() for MP1/2/3
  avformat: do not require frame_size in avformat_find_stream_info() for AAC
  ...

Conflicts:
doc/APIchanges
libavcodec/Makefile
libavcodec/avcodec.h
libavcodec/h264.c
libavcodec/h264_ps.c
libavcodec/utils.c
libavcodec/version.h
libavcodec/x86/dsputil_mmx.c
libavformat/utils.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
37 files changed:
1  2 
doc/APIchanges
libavcodec/Makefile
libavcodec/alsdec.c
libavcodec/amrnbdec.c
libavcodec/amrwbdec.c
libavcodec/avcodec.h
libavcodec/dca.c
libavcodec/dca_parser.c
libavcodec/escape124.c
libavcodec/h261dec.c
libavcodec/h263dec.c
libavcodec/h264.c
libavcodec/h264_ps.c
libavcodec/h264_sei.c
libavcodec/huffyuv.c
libavcodec/ituh263dec.c
libavcodec/libopencore-amr.c
libavcodec/libspeexdec.c
libavcodec/mjpegdec.c
libavcodec/utils.c
libavcodec/version.h
libavcodec/vp6.c
libavcodec/x86/dsputil_mmx.c
libavformat/aiffdec.c
libavformat/amr.c
libavformat/ape.c
libavformat/avformat.h
libavformat/dv.c
libavformat/mov.c
libavformat/rdt.c
libavformat/riff.c
libavformat/rtpenc.c
libavformat/seek.c
libavformat/seek.h
libavformat/siff.c
libavformat/swfenc.c
libavformat/utils.c

diff --cc doc/APIchanges
@@@ -13,28 -12,10 +13,32 @@@ libavutil:   2011-04-1
  
  API changes, most recent first:
  
 +2012-02-21 - xxxxxxx - lavc 54.4.100
 +  Add av_get_pcm_codec() function.
 +
 +2012-02-16 - xxxxxxx - libswr 0.7.100
 +  Add swr_set_matrix() function.
 +
 +2012-02-09 - xxxxxxx - lavu 51.39.100
 +  Add a new installed header libavutil/timestamp.h with timestamp
 +  utilities.
 +
 +2012-02-06 - xxxxxxx - lavu 51.38.100
 +  Add av_parse_ratio() function to parseutils.h.
 +
 +2012-02-06 - xxxxxxx - lavu 51.38.100
 +  Add AV_LOG_MAX_OFFSET macro to log.h.
 +
 +2012-02-02 - xxxxxxx - lavu 51.37.100
 +  Add public timecode helpers.
 +
 +2012-01-24 - xxxxxxx - lavfi 2.60.100
 +  Add avfilter_graph_dump.
 +
+ 2012-xx-xx - lavc 54.8.0
+   xxxxxxx Add av_get_exact_bits_per_sample()
+   xxxxxxx Add av_get_audio_frame_duration()
  2012-03-xx - xxxxxxx - lavc 54.7.0 - avcodec.h
    Add av_codec_is_encoder/decoder().
  
@@@ -118,10 -109,8 +118,11 @@@ OBJS-$(CONFIG_CLJR_ENCODER)            
  OBJS-$(CONFIG_COOK_DECODER)            += cook.o
  OBJS-$(CONFIG_CSCD_DECODER)            += cscd.o
  OBJS-$(CONFIG_CYUV_DECODER)            += cyuv.o
- OBJS-$(CONFIG_DCA_DECODER)             += dca.o synth_filter.o dcadsp.o
+ OBJS-$(CONFIG_DCA_DECODER)             += dca.o synth_filter.o dcadsp.o \
+                                           dca_parser.o
 +OBJS-$(CONFIG_DCA_ENCODER)             += dcaenc.o
 +OBJS-$(CONFIG_DIRAC_DECODER)           += diracdec.o dirac.o diracdsp.o \
 +                                          dirac_arith.o mpeg12data.o dwt.o
  OBJS-$(CONFIG_DFA_DECODER)             += dfa.o
  OBJS-$(CONFIG_DNXHD_DECODER)           += dnxhddec.o dnxhddata.o
  OBJS-$(CONFIG_DNXHD_ENCODER)           += dnxhdenc.o dnxhddata.o       \
Simple merge
Simple merge
Simple merge
@@@ -4121,13 -3933,25 +4121,33 @@@ void avcodec_default_free_buffers(AVCod
  int av_get_bits_per_sample(enum CodecID codec_id);
  
  /**
 + * Return the PCM codec associated with a sample format.
 + * @param be  endianness, 0 for little, 1 for big,
 + *            -1 (or anything else) for native
 + * @return  CODEC_ID_PCM_* or CODEC_ID_NONE
 + */
 +enum CodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
 +
++/**
+  * Return codec bits per sample.
+  * Only return non-zero if the bits per sample is exactly correct, not an
+  * approximation.
+  *
+  * @param[in] codec_id the codec
+  * @return Number of bits per sample or zero if unknown for the given codec.
+  */
+ int av_get_exact_bits_per_sample(enum CodecID codec_id);
+ /**
+  * Return audio frame duration.
+  *
+  * @param avctx        codec context
+  * @param frame_bytes  size of the frame, or 0 if unknown
+  * @return             frame duration, in samples, if known. 0 if not able to
+  *                     determine.
+  */
+ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
  /* frame parsing */
  typedef struct AVCodecParserContext {
      void *priv_data;
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -3687,8 -3667,7 +3687,8 @@@ static int decode_slice(struct AVCodecC
                  if(s->mb_y >= s->mb_height){
                      tprintf(s->avctx, "slice end %d %d\n", get_bits_count(&s->gb), s->gb.size_in_bits);
  
-                     if(   get_bits_count(&s->gb) == s->gb.size_in_bits
-                        || get_bits_count(&s->gb) <  s->gb.size_in_bits && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
 -                    if (get_bits_left(&s->gb) == 0) {
++                    if (   get_bits_left(&s->gb) == 0
++                        || get_bits_left(&s->gb) > 0 && !(s->avctx->err_recognition & AV_EF_AGGRESSIVE)) {
                          ff_er_add_slice(s, s->resync_mb_x, s->resync_mb_y, s->mb_x-1, s->mb_y, ER_MB_END&part_mask);
  
                          return 0;
@@@ -241,7 -227,8 +241,7 @@@ static inline int decode_vui_parameters
          sps->num_reorder_frames= get_ue_golomb(&s->gb);
          get_ue_golomb(&s->gb); /*max_dec_frame_buffering*/
  
-         if(get_bits_left(&s->gb) < 0){
+         if (get_bits_left(&s->gb) < 0) {
 -            av_log(h->s.avctx, AV_LOG_ERROR, "Overread VUI by %d bits\n", -get_bits_left(&s->gb));
              sps->num_reorder_frames=0;
              sps->bitstream_restriction_flag= 0;
          }
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -1887,25 -1724,164 +1881,185 @@@ int av_get_exact_bits_per_sample(enum C
      }
  }
  
 +enum CodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be)
 +{
 +    static const enum CodecID map[AV_SAMPLE_FMT_NB][2] = {
 +        [AV_SAMPLE_FMT_U8  ] = { CODEC_ID_PCM_U8,    CODEC_ID_PCM_U8    },
 +        [AV_SAMPLE_FMT_S16 ] = { CODEC_ID_PCM_S16LE, CODEC_ID_PCM_S16BE },
 +        [AV_SAMPLE_FMT_S32 ] = { CODEC_ID_PCM_S32LE, CODEC_ID_PCM_S32BE },
 +        [AV_SAMPLE_FMT_FLT ] = { CODEC_ID_PCM_F32LE, CODEC_ID_PCM_F32BE },
 +        [AV_SAMPLE_FMT_DBL ] = { CODEC_ID_PCM_F64LE, CODEC_ID_PCM_F64BE },
 +        [AV_SAMPLE_FMT_U8P ] = { CODEC_ID_PCM_U8,    CODEC_ID_PCM_U8    },
 +        [AV_SAMPLE_FMT_S16P] = { CODEC_ID_PCM_S16LE, CODEC_ID_PCM_S16BE },
 +        [AV_SAMPLE_FMT_S32P] = { CODEC_ID_PCM_S32LE, CODEC_ID_PCM_S32BE },
 +        [AV_SAMPLE_FMT_FLTP] = { CODEC_ID_PCM_F32LE, CODEC_ID_PCM_F32BE },
 +        [AV_SAMPLE_FMT_DBLP] = { CODEC_ID_PCM_F64LE, CODEC_ID_PCM_F64BE },
 +    };
 +    if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB)
 +        return CODEC_ID_NONE;
 +    if (be < 0 || be > 1)
 +        be = AV_NE(1, 0);
 +    return map[fmt][be];
++}
++
+ int av_get_bits_per_sample(enum CodecID codec_id)
+ {
+     switch (codec_id) {
+     case CODEC_ID_ADPCM_SBPRO_2:
+         return 2;
+     case CODEC_ID_ADPCM_SBPRO_3:
+         return 3;
+     case CODEC_ID_ADPCM_SBPRO_4:
+     case CODEC_ID_ADPCM_IMA_WAV:
+     case CODEC_ID_ADPCM_IMA_QT:
+     case CODEC_ID_ADPCM_SWF:
+     case CODEC_ID_ADPCM_MS:
+         return 4;
+     default:
+         return av_get_exact_bits_per_sample(codec_id);
+     }
+ }
+ int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
+ {
+     int id, sr, ch, ba, tag, bps;
+     id  = avctx->codec_id;
+     sr  = avctx->sample_rate;
+     ch  = avctx->channels;
+     ba  = avctx->block_align;
+     tag = avctx->codec_tag;
+     bps = av_get_exact_bits_per_sample(avctx->codec_id);
+     /* codecs with an exact constant bits per sample */
+     if (bps > 0 && ch > 0 && frame_bytes > 0)
+         return (frame_bytes * 8) / (bps * ch);
+     bps = avctx->bits_per_coded_sample;
+     /* codecs with a fixed packet duration */
+     switch (id) {
+     case CODEC_ID_ADPCM_ADX:    return   32;
+     case CODEC_ID_ADPCM_IMA_QT: return   64;
+     case CODEC_ID_ADPCM_EA_XAS: return  128;
+     case CODEC_ID_AMR_NB:
+     case CODEC_ID_GSM:
+     case CODEC_ID_QCELP:
+     case CODEC_ID_RA_144:
+     case CODEC_ID_RA_288:       return  160;
+     case CODEC_ID_IMC:          return  256;
+     case CODEC_ID_AMR_WB:
+     case CODEC_ID_GSM_MS:       return  320;
+     case CODEC_ID_MP1:          return  384;
+     case CODEC_ID_ATRAC1:       return  512;
+     case CODEC_ID_ATRAC3:       return 1024;
+     case CODEC_ID_MP2:
+     case CODEC_ID_MUSEPACK7:    return 1152;
+     case CODEC_ID_AC3:          return 1536;
+     }
+     if (sr > 0) {
+         /* calc from sample rate */
+         if (id == CODEC_ID_TTA)
+             return 256 * sr / 245;
+         if (ch > 0) {
+             /* calc from sample rate and channels */
+             if (id == CODEC_ID_BINKAUDIO_DCT)
+                 return (480 << (sr / 22050)) / ch;
+         }
+     }
+     if (ba > 0) {
+         /* calc from block_align */
+         if (id == CODEC_ID_SIPR) {
+             switch (ba) {
+             case 20: return 160;
+             case 19: return 144;
+             case 29: return 288;
+             case 37: return 480;
+             }
+         }
+     }
+     if (frame_bytes > 0) {
+         /* calc from frame_bytes only */
+         if (id == CODEC_ID_TRUESPEECH)
+             return 240 * (frame_bytes / 32);
+         if (id == CODEC_ID_NELLYMOSER)
+             return 256 * (frame_bytes / 64);
+         if (bps > 0) {
+             /* calc from frame_bytes and bits_per_coded_sample */
+             if (id == CODEC_ID_ADPCM_G726)
+                 return frame_bytes * 8 / bps;
+         }
+         if (ch > 0) {
+             /* calc from frame_bytes and channels */
+             switch (id) {
+             case CODEC_ID_ADPCM_4XM:
+             case CODEC_ID_ADPCM_IMA_ISS:
+                 return (frame_bytes - 4 * ch) * 2 / ch;
+             case CODEC_ID_ADPCM_IMA_SMJPEG:
+                 return (frame_bytes - 4) * 2 / ch;
+             case CODEC_ID_ADPCM_IMA_AMV:
+                 return (frame_bytes - 8) * 2 / ch;
+             case CODEC_ID_ADPCM_XA:
+                 return (frame_bytes / 128) * 224 / ch;
+             case CODEC_ID_INTERPLAY_DPCM:
+                 return (frame_bytes - 6 - ch) / ch;
+             case CODEC_ID_ROQ_DPCM:
+                 return (frame_bytes - 8) / ch;
+             case CODEC_ID_XAN_DPCM:
+                 return (frame_bytes - 2 * ch) / ch;
+             case CODEC_ID_MACE3:
+                 return 3 * frame_bytes / ch;
+             case CODEC_ID_MACE6:
+                 return 6 * frame_bytes / ch;
+             case CODEC_ID_PCM_LXF:
+                 return 2 * (frame_bytes / (5 * ch));
+             }
+             if (tag) {
+                 /* calc from frame_bytes, channels, and codec_tag */
+                 if (id == CODEC_ID_SOL_DPCM) {
+                     if (tag == 3)
+                         return frame_bytes / ch;
+                     else
+                         return frame_bytes * 2 / ch;
+                 }
+             }
+             if (ba > 0) {
+                 /* calc from frame_bytes, channels, and block_align */
+                 int blocks = frame_bytes / ba;
+                 switch (avctx->codec_id) {
+                 case CODEC_ID_ADPCM_IMA_WAV:
+                     return blocks * (1 + (ba - 4 * ch) / (4 * ch) * 8);
+                 case CODEC_ID_ADPCM_IMA_DK3:
+                     return blocks * (((ba - 16) * 8 / 3) / ch);
+                 case CODEC_ID_ADPCM_IMA_DK4:
+                     return blocks * (1 + (ba - 4 * ch) * 2 / ch);
+                 case CODEC_ID_ADPCM_MS:
+                     return blocks * (2 + (ba - 7 * ch) * 2 / ch);
+                 }
+             }
+             if (bps > 0) {
+                 /* calc from frame_bytes, channels, and bits_per_coded_sample */
+                 switch (avctx->codec_id) {
+                 case CODEC_ID_PCM_DVD:
+                     return 2 * (frame_bytes / ((bps * 2 / 8) * ch));
+                 case CODEC_ID_PCM_BLURAY:
+                     return frame_bytes / ((FFALIGN(ch, 2) * bps) / 8);
+                 case CODEC_ID_S302M:
+                     return 2 * (frame_bytes / ((bps + 4) / 4)) / ch;
+                 }
+             }
+         }
+     }
+     return 0;
  }
  
  #if !HAVE_THREADS
@@@ -21,8 -21,8 +21,8 @@@
  #define AVCODEC_VERSION_H
  
  #define LIBAVCODEC_VERSION_MAJOR 54
- #define LIBAVCODEC_VERSION_MINOR  9
 -#define LIBAVCODEC_VERSION_MINOR  8
 -#define LIBAVCODEC_VERSION_MICRO  0
++#define LIBAVCODEC_VERSION_MINOR  10
 +#define LIBAVCODEC_VERSION_MICRO 100
  
  #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
                                                 LIBAVCODEC_VERSION_MINOR, \
Simple merge
@@@ -2415,12 -2336,467 +2415,470 @@@ extern void ff_butterflies_float_interl
  extern void ff_butterflies_float_interleave_avx(float *dst, const float *src0,
                                                  const float *src1, int len);
  
- void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
+ #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU, PREFIX) \
+     c->PFX ## _pixels_tab[IDX][ 0] = PREFIX ## PFX ## SIZE ## _mc00_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 1] = PREFIX ## PFX ## SIZE ## _mc10_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 2] = PREFIX ## PFX ## SIZE ## _mc20_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 3] = PREFIX ## PFX ## SIZE ## _mc30_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 4] = PREFIX ## PFX ## SIZE ## _mc01_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 5] = PREFIX ## PFX ## SIZE ## _mc11_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 6] = PREFIX ## PFX ## SIZE ## _mc21_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 7] = PREFIX ## PFX ## SIZE ## _mc31_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 8] = PREFIX ## PFX ## SIZE ## _mc02_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][ 9] = PREFIX ## PFX ## SIZE ## _mc12_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][10] = PREFIX ## PFX ## SIZE ## _mc22_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][11] = PREFIX ## PFX ## SIZE ## _mc32_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][12] = PREFIX ## PFX ## SIZE ## _mc03_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][13] = PREFIX ## PFX ## SIZE ## _mc13_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][14] = PREFIX ## PFX ## SIZE ## _mc23_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][15] = PREFIX ## PFX ## SIZE ## _mc33_ ## CPU
+ #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
+     c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU;    \
+     c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
+     c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
+ #define H264_QPEL_FUNCS(x, y, CPU) \
+     c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU; \
+     c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;  \
+     c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU; \
+     c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU
+ #define H264_QPEL_FUNCS_10(x, y, CPU) \
+     c->put_h264_qpel_pixels_tab[0][x+y*4] = ff_put_h264_qpel16_mc##x##y##_10_##CPU; \
+     c->put_h264_qpel_pixels_tab[1][x+y*4] = ff_put_h264_qpel8_mc##x##y##_10_##CPU;  \
+     c->avg_h264_qpel_pixels_tab[0][x+y*4] = ff_avg_h264_qpel16_mc##x##y##_10_##CPU; \
+     c->avg_h264_qpel_pixels_tab[1][x+y*4] = ff_avg_h264_qpel8_mc##x##y##_10_##CPU;
+ static void dsputil_init_mmx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
+ {
+     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
+     c->put_pixels_clamped        = ff_put_pixels_clamped_mmx;
+     c->put_signed_pixels_clamped = ff_put_signed_pixels_clamped_mmx;
+     c->add_pixels_clamped        = ff_add_pixels_clamped_mmx;
+     if (!high_bit_depth) {
+         c->clear_block  = clear_block_mmx;
+         c->clear_blocks = clear_blocks_mmx;
+         c->draw_edges   = draw_edges_mmx;
+         SET_HPEL_FUNCS(put, 0, 16, mmx);
+         SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
+         SET_HPEL_FUNCS(avg, 0, 16, mmx);
+         SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
+         SET_HPEL_FUNCS(put, 1, 8, mmx);
+         SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
+         SET_HPEL_FUNCS(avg, 1, 8, mmx);
+         SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
+     }
+ #if ARCH_X86_32 || !HAVE_YASM
+     c->gmc= gmc_mmx;
+ #endif
+ #if ARCH_X86_32 && HAVE_YASM
+     if (!high_bit_depth)
+         c->emulated_edge_mc = emulated_edge_mc_mmx;
+ #endif
+     c->add_bytes = add_bytes_mmx;
++    c->put_no_rnd_pixels_l2[0]= put_vp_no_rnd_pixels16_l2_mmx;
++    c->put_no_rnd_pixels_l2[1]= put_vp_no_rnd_pixels8_l2_mmx;
++
+     if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
+         c->h263_v_loop_filter = h263_v_loop_filter_mmx;
+         c->h263_h_loop_filter = h263_h_loop_filter_mmx;
+     }
+ #if HAVE_YASM
+     if (!high_bit_depth && CONFIG_H264CHROMA) {
+         c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_mmx_rnd;
+         c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_mmx;
+     }
+     c->vector_clip_int32 = ff_vector_clip_int32_mmx;
+ #endif
+ }
+ static void dsputil_init_mmx2(DSPContext *c, AVCodecContext *avctx,
+                               int mm_flags)
+ {
+     const int bit_depth      = avctx->bits_per_raw_sample;
+     const int high_bit_depth = bit_depth > 8;
+     c->prefetch = prefetch_mmx2;
+     if (!high_bit_depth) {
+         c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
+         c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
+         c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
+         c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
+         c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
+         c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
+         c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
+         c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
+         c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
+         c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
+     }
+     if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
+         if (!high_bit_depth) {
+             c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
+             c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
+             c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
+             c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
+             c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
+             c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
+         }
+         if (CONFIG_VP3_DECODER && HAVE_YASM) {
+             c->vp3_v_loop_filter = ff_vp3_v_loop_filter_mmx2;
+             c->vp3_h_loop_filter = ff_vp3_h_loop_filter_mmx2;
+         }
+     }
+     if (CONFIG_VP3_DECODER && HAVE_YASM) {
+         c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
+     }
+     if (CONFIG_VP3_DECODER
+         && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
+         c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_mmx2;
+         c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_mmx2;
+     }
+     SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2, );
+     SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2, );
+     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2, );
+     SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2, );
+     SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2, );
+     SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2, );
+     if (!high_bit_depth) {
+         SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2, );
+         SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2, );
+         SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2, );
+         SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2, );
+         SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2, );
+         SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2, );
+     } else if (bit_depth == 10) {
+ #if HAVE_YASM
+ #if !ARCH_X86_64
+         SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_mmxext, ff_);
+         SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_mmxext, ff_);
+         SET_QPEL_FUNCS(put_h264_qpel, 1, 8,  10_mmxext, ff_);
+         SET_QPEL_FUNCS(avg_h264_qpel, 1, 8,  10_mmxext, ff_);
+ #endif
+         SET_QPEL_FUNCS(put_h264_qpel, 2, 4,  10_mmxext, ff_);
+         SET_QPEL_FUNCS(avg_h264_qpel, 2, 4,  10_mmxext, ff_);
+ #endif
+     }
+     SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2, );
+     SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2, );
+     SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2, );
+     SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2, );
+ #if HAVE_YASM
+     if (!high_bit_depth && CONFIG_H264CHROMA) {
+         c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_mmx2_rnd;
+         c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_mmx2;
+         c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_mmx2;
+         c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_mmx2;
+     }
+     if (bit_depth == 10 && CONFIG_H264CHROMA) {
+         c->put_h264_chroma_pixels_tab[2] = ff_put_h264_chroma_mc2_10_mmxext;
+         c->avg_h264_chroma_pixels_tab[2] = ff_avg_h264_chroma_mc2_10_mmxext;
+         c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_10_mmxext;
+         c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_10_mmxext;
+     }
+     c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
+     c->scalarproduct_int16          = ff_scalarproduct_int16_mmx2;
+     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
+     if (avctx->flags & CODEC_FLAG_BITEXACT) {
+         c->apply_window_int16 = ff_apply_window_int16_mmxext_ba;
+     } else {
+         c->apply_window_int16 = ff_apply_window_int16_mmxext;
+     }
+ #endif
+ }
+ static void dsputil_init_3dnow(DSPContext *c, AVCodecContext *avctx,
+                                int mm_flags)
  {
-     int mm_flags = av_get_cpu_flags();
      const int high_bit_depth = avctx->bits_per_raw_sample > 8;
+     c->prefetch = prefetch_3dnow;
+     if (!high_bit_depth) {
+         c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
+         c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
+         c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
+         c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
+         c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
+         c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
+         c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
+         c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
+         c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
+         c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
+         if (!(avctx->flags & CODEC_FLAG_BITEXACT)){
+             c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
+             c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
+             c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
+             c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
+             c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
+             c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
+         }
+     }
+     if (CONFIG_VP3_DECODER
+         && (avctx->codec_id == CODEC_ID_VP3 || avctx->codec_id == CODEC_ID_THEORA)) {
+         c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_exact_3dnow;
+         c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_exact_3dnow;
+     }
+     SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow, );
+     SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow, );
+     SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow, );
+     SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow, );
+     SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow, );
+     SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow, );
+     if (!high_bit_depth) {
+         SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow, );
+         SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow, );
+         SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow, );
+         SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow, );
+         SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow, );
+         SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow, );
+     }
+     SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow, );
+     SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow, );
+     SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow, );
+     SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow, );
+ #if HAVE_YASM
+     if (!high_bit_depth && CONFIG_H264CHROMA) {
+         c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_3dnow_rnd;
+         c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_3dnow;
+     }
+ #endif
+     c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
+     c->vector_fmul             = vector_fmul_3dnow;
+     c->vector_fmul_add         = vector_fmul_add_3dnow;
+ #if HAVE_7REGS
+     c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
+ #endif
+ }
+ static void dsputil_init_3dnow2(DSPContext *c, AVCodecContext *avctx,
+                                 int mm_flags)
+ {
+     c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
+ #if HAVE_6REGS
+     c->vector_fmul_window  = vector_fmul_window_3dnow2;
+ #endif
+ }
+ static void dsputil_init_sse(DSPContext *c, AVCodecContext *avctx, int mm_flags)
+ {
+     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
+     if (!high_bit_depth) {
+         if (!(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
+             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
+             c->clear_block  = clear_block_sse;
+             c->clear_blocks = clear_blocks_sse;
+         }
+     }
+     c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
+     c->ac3_downmix             = ac3_downmix_sse;
+     c->vector_fmul             = vector_fmul_sse;
+     c->vector_fmul_reverse     = vector_fmul_reverse_sse;
+     if (!(mm_flags & AV_CPU_FLAG_3DNOW))
+         c->vector_fmul_add = vector_fmul_add_sse;
+ #if HAVE_6REGS
+     c->vector_fmul_window = vector_fmul_window_sse;
+ #endif
+     c->vector_clipf = vector_clipf_sse;
+ #if HAVE_YASM
+     c->scalarproduct_float          = ff_scalarproduct_float_sse;
+     c->butterflies_float_interleave = ff_butterflies_float_interleave_sse;
+     if (!high_bit_depth)
+         c->emulated_edge_mc = emulated_edge_mc_sse;
+     c->gmc = gmc_sse;
+ #endif
+ }
+ static void dsputil_init_sse2(DSPContext *c, AVCodecContext *avctx,
+                               int mm_flags)
+ {
+     const int bit_depth      = avctx->bits_per_raw_sample;
+     const int high_bit_depth = bit_depth > 8;
+     if (mm_flags & AV_CPU_FLAG_3DNOW) {
+         // these functions are slower than mmx on AMD, but faster on Intel
+         if (!high_bit_depth) {
+             c->put_pixels_tab[0][0]        = put_pixels16_sse2;
+             c->put_no_rnd_pixels_tab[0][0] = put_pixels16_sse2;
+             c->avg_pixels_tab[0][0]        = avg_pixels16_sse2;
+             H264_QPEL_FUNCS(0, 0, sse2);
+         }
+     }
+     if (!high_bit_depth) {
+         H264_QPEL_FUNCS(0, 1, sse2);
+         H264_QPEL_FUNCS(0, 2, sse2);
+         H264_QPEL_FUNCS(0, 3, sse2);
+         H264_QPEL_FUNCS(1, 1, sse2);
+         H264_QPEL_FUNCS(1, 2, sse2);
+         H264_QPEL_FUNCS(1, 3, sse2);
+         H264_QPEL_FUNCS(2, 1, sse2);
+         H264_QPEL_FUNCS(2, 2, sse2);
+         H264_QPEL_FUNCS(2, 3, sse2);
+         H264_QPEL_FUNCS(3, 1, sse2);
+         H264_QPEL_FUNCS(3, 2, sse2);
+         H264_QPEL_FUNCS(3, 3, sse2);
+     }
+ #if HAVE_YASM
+     if (bit_depth == 10) {
+         SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 10_sse2, ff_);
+         SET_QPEL_FUNCS(put_h264_qpel, 1, 8,  10_sse2, ff_);
+         SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 10_sse2, ff_);
+         SET_QPEL_FUNCS(avg_h264_qpel, 1, 8,  10_sse2, ff_);
+         H264_QPEL_FUNCS_10(1, 0, sse2_cache64);
+         H264_QPEL_FUNCS_10(2, 0, sse2_cache64);
+         H264_QPEL_FUNCS_10(3, 0, sse2_cache64);
+         if (CONFIG_H264CHROMA) {
+             c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_sse2;
+             c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_sse2;
+         }
+     }
+     c->scalarproduct_int16          = ff_scalarproduct_int16_sse2;
+     c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
+     if (mm_flags & AV_CPU_FLAG_ATOM) {
+         c->vector_clip_int32 = ff_vector_clip_int32_int_sse2;
+     } else {
+         c->vector_clip_int32 = ff_vector_clip_int32_sse2;
+     }
+     if (avctx->flags & CODEC_FLAG_BITEXACT) {
+         c->apply_window_int16 = ff_apply_window_int16_sse2_ba;
+     } else  if (!(mm_flags & AV_CPU_FLAG_SSE2SLOW)) {
+         c->apply_window_int16 = ff_apply_window_int16_sse2;
+     }
+     c->bswap_buf = ff_bswap32_buf_sse2;
+ #endif
+ }
+ static void dsputil_init_ssse3(DSPContext *c, AVCodecContext *avctx,
+                                int mm_flags)
+ {
+ #if HAVE_SSSE3
+     const int high_bit_depth = avctx->bits_per_raw_sample > 8;
+     const int bit_depth      = avctx->bits_per_raw_sample;
+     if (!high_bit_depth) {
+         H264_QPEL_FUNCS(1, 0, ssse3);
+         H264_QPEL_FUNCS(1, 1, ssse3);
+         H264_QPEL_FUNCS(1, 2, ssse3);
+         H264_QPEL_FUNCS(1, 3, ssse3);
+         H264_QPEL_FUNCS(2, 0, ssse3);
+         H264_QPEL_FUNCS(2, 1, ssse3);
+         H264_QPEL_FUNCS(2, 2, ssse3);
+         H264_QPEL_FUNCS(2, 3, ssse3);
+         H264_QPEL_FUNCS(3, 0, ssse3);
+         H264_QPEL_FUNCS(3, 1, ssse3);
+         H264_QPEL_FUNCS(3, 2, ssse3);
+         H264_QPEL_FUNCS(3, 3, ssse3);
+     }
+ #if HAVE_YASM
+     else if (bit_depth == 10) {
+         H264_QPEL_FUNCS_10(1, 0, ssse3_cache64);
+         H264_QPEL_FUNCS_10(2, 0, ssse3_cache64);
+         H264_QPEL_FUNCS_10(3, 0, ssse3_cache64);
+     }
+     if (!high_bit_depth && CONFIG_H264CHROMA) {
+         c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_ssse3_rnd;
+         c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_ssse3_rnd;
+         c->put_h264_chroma_pixels_tab[1] = ff_put_h264_chroma_mc4_ssse3;
+         c->avg_h264_chroma_pixels_tab[1] = ff_avg_h264_chroma_mc4_ssse3;
+     }
+     c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
+     if (mm_flags & AV_CPU_FLAG_SSE4) // not really sse4, just slow on Conroe
+         c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
+     if (mm_flags & AV_CPU_FLAG_ATOM) {
+         c->apply_window_int16 = ff_apply_window_int16_ssse3_atom;
+     } else {
+         c->apply_window_int16 = ff_apply_window_int16_ssse3;
+     }
+     if (!(mm_flags & (AV_CPU_FLAG_SSE42|AV_CPU_FLAG_3DNOW))) { // cachesplit
+         c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
+     }
+     c->bswap_buf = ff_bswap32_buf_ssse3;
+ #endif
+ #endif
+ }
+ static void dsputil_init_sse4(DSPContext *c, AVCodecContext *avctx,
+                               int mm_flags)
+ {
+ #if HAVE_YASM
+     c->vector_clip_int32 = ff_vector_clip_int32_sse4;
+ #endif
+ }
+ static void dsputil_init_avx(DSPContext *c, AVCodecContext *avctx, int mm_flags)
+ {
+ #if HAVE_AVX && HAVE_YASM
      const int bit_depth = avctx->bits_per_raw_sample;
  
+     if (bit_depth == 10) {
+         // AVX implies !cache64.
+         // TODO: Port cache(32|64) detection from x264.
+         H264_QPEL_FUNCS_10(1, 0, sse2);
+         H264_QPEL_FUNCS_10(2, 0, sse2);
+         H264_QPEL_FUNCS_10(3, 0, sse2);
+         if (CONFIG_H264CHROMA) {
+             c->put_h264_chroma_pixels_tab[0] = ff_put_h264_chroma_mc8_10_avx;
+             c->avg_h264_chroma_pixels_tab[0] = ff_avg_h264_chroma_mc8_10_avx;
+         }
+     }
+     c->butterflies_float_interleave = ff_butterflies_float_interleave_avx;
+ #endif
+ }
+ void ff_dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
+ {
+     int mm_flags = av_get_cpu_flags();
      if (avctx->dsp_mask) {
          if (avctx->dsp_mask & AV_CPU_FLAG_FORCE)
              mm_flags |= (avctx->dsp_mask & 0xffff);
Simple merge
Simple merge
Simple merge
@@@ -647,15 -634,12 +647,12 @@@ typedef struct AVStream 
          int64_t last_dts;
          int64_t duration_gcd;
          int duration_count;
 -        double duration_error[MAX_STD_TIMEBASES];
 +        double duration_error[2][2][MAX_STD_TIMEBASES];
          int64_t codec_info_duration;
          int nb_decoded_frames;
+         int found_decoder;
      } *info;
  
-     AVPacket cur_pkt;
-     const uint8_t *cur_ptr;
-     int cur_len;
      int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
  
      // Timestamp generation support:
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -752,17 -694,6 +752,17 @@@ int av_read_packet(AVFormatContext *s, 
  
  /**********************************************************/
  
-     if (avctx->codec_id == CODEC_ID_AAC ||
 +static int determinable_frame_size(AVCodecContext *avctx)
 +{
-         avctx->codec_id == CODEC_ID_MP2 ||
-         avctx->codec_id == CODEC_ID_MP3 ||
-         avctx->codec_id == CODEC_ID_CELT)
++    if (/*avctx->codec_id == CODEC_ID_AAC ||
 +        avctx->codec_id == CODEC_ID_MP1 ||
++        avctx->codec_id == CODEC_ID_MP2 ||*/
++        avctx->codec_id == CODEC_ID_MP3/* ||
++        avctx->codec_id == CODEC_ID_CELT*/)
 +        return 1;
 +    return 0;
 +}
 +
  /**
   * Get the number of samples of an audio frame. Return -1 on error.
   */
@@@ -860,11 -784,11 +855,20 @@@ static int is_intra_only(AVCodecContex
      return 0;
  }
  
++static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
++{
++    if (pktl->next)
++        return pktl->next;
++    if (pktl == s->parse_queue_end)
++        return s->packet_buffer;
++    return NULL;
++}
++
  static void update_initial_timestamps(AVFormatContext *s, int stream_index,
                                        int64_t dts, int64_t pts)
  {
      AVStream *st= s->streams[stream_index];
--    AVPacketList *pktl= s->packet_buffer;
++    AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
  
      if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE)
          return;
      st->first_dts= dts - st->cur_dts;
      st->cur_dts= dts;
  
--    for(; pktl; pktl= pktl->next){
++    for(; pktl; pktl= get_next_pkt(s, st, pktl)){
          if(pktl->pkt.stream_index != stream_index)
              continue;
          //FIXME think more about this check
          st->start_time = pts;
  }
  
- static void update_initial_durations(AVFormatContext *s, AVStream *st, AVPacket *pkt)
+ static void update_initial_durations(AVFormatContext *s, AVStream *st,
+                                      int stream_index, int duration)
  {
--    AVPacketList *pktl= s->packet_buffer;
++    AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
      int64_t cur_dts= 0;
  
      if(st->first_dts != AV_NOPTS_VALUE){
          cur_dts= st->first_dts;
--        for(; pktl; pktl= pktl->next){
-             if(pktl->pkt.stream_index == pkt->stream_index){
++        for(; pktl; pktl= get_next_pkt(s, st, pktl)){
+             if(pktl->pkt.stream_index == stream_index){
                  if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
                      break;
-                 cur_dts -= pkt->duration;
+                 cur_dts -= duration;
              }
          }
--        pktl= s->packet_buffer;
++        pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
          st->first_dts = cur_dts;
      }else if(st->cur_dts)
          return;
  
--    for(; pktl; pktl= pktl->next){
-         if(pktl->pkt.stream_index != pkt->stream_index)
++    for(; pktl; pktl= get_next_pkt(s, st, pktl)){
+         if(pktl->pkt.stream_index != stream_index)
              continue;
 -        if(pktl->pkt.pts == pktl->pkt.dts && pktl->pkt.dts == AV_NOPTS_VALUE
 +        if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
             && !pktl->pkt.duration){
              pktl->pkt.dts= cur_dts;
              if(!st->codec->has_b_frames)
                  pktl->pkt.pts= cur_dts;
-             pktl->pkt.duration= pkt->duration;
 -            cur_dts += duration;
 -            if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
++//            if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
+                 pktl->pkt.duration = duration;
          }else
              break;
 +        cur_dts = pktl->pkt.dts + pktl->pkt.duration;
      }
 -    if(st->first_dts == AV_NOPTS_VALUE)
 +    if(!pktl)
          st->cur_dts= cur_dts;
  }
  
@@@ -967,10 -890,11 +973,10 @@@ static void compute_pkt_fields(AVFormat
          compute_frame_duration(&num, &den, st, pc, pkt);
          if (den && num) {
              pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
 -
 -            if(pkt->duration != 0 && s->packet_buffer)
 -                update_initial_durations(s, st, pkt->stream_index, pkt->duration);
          }
      }
-     if(pkt->duration != 0 && s->packet_buffer)
-         update_initial_durations(s, st, pkt);
++    if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
++        update_initial_durations(s, st, pkt->stream_index, pkt->duration);
  
      /* correct timestamps with byte offset if demuxers only have timestamps
         on packet boundaries */
              st->last_IP_pts= pkt->pts;
              /* cannot compute PTS if not present (we can compute it only
              by knowing the future */
-         } else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
-             if(pkt->pts != AV_NOPTS_VALUE && pkt->duration){
-                 int64_t old_diff= FFABS(st->cur_dts - pkt->duration - pkt->pts);
+         } else if (pkt->pts != AV_NOPTS_VALUE ||
+                    pkt->dts != AV_NOPTS_VALUE ||
 -                   pkt->duration              ||
 -                   st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
++                   pkt->duration                ) {
+             int duration = pkt->duration;
 -            if (!duration && st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
 -                compute_frame_duration(&num, &den, st, pc, pkt);
 -                if (den && num) {
 -                    duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den,
 -                                                 den * (int64_t)st->time_base.num,
 -                                                 AV_ROUND_DOWN);
 -                    if (duration != 0 && s->packet_buffer) {
 -                        update_initial_durations(s, st, pkt->stream_index,
 -                                                 duration);
 -                    }
 -                }
 -            }
+             if(pkt->pts != AV_NOPTS_VALUE && duration){
+                 int64_t old_diff= FFABS(st->cur_dts - duration - pkt->pts);
                  int64_t new_diff= FFABS(st->cur_dts - pkt->pts);
-                 if(old_diff < new_diff && old_diff < (pkt->duration>>3)){
-                     pkt->pts += pkt->duration;
+                 if(old_diff < new_diff && old_diff < (duration>>3)){
+                     pkt->pts += duration;
      //                av_log(NULL, AV_LOG_DEBUG, "id:%d old:%"PRId64" new:%"PRId64" dur:%d cur:%"PRId64" size:%d\n", pkt->stream_index, old_diff, new_diff, pkt->duration, st->cur_dts, pkt->size);
                  }
              }
          pkt->convergence_duration = pc->convergence_duration;
  }
  
+ static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
+ {
+     while (*pkt_buf) {
+         AVPacketList *pktl = *pkt_buf;
+         *pkt_buf = pktl->next;
+         av_free_packet(&pktl->pkt);
+         av_freep(&pktl);
+     }
+     *pkt_buf_end = NULL;
+ }
  
- static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
+ /**
+  * Parse a packet, add all split parts to parse_queue
+  *
+  * @param pkt packet to parse, NULL when flushing the parser at end of stream
+  */
+ static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
  {
-     AVStream *st;
-     int len, ret, i;
+     AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
+     AVStream     *st = s->streams[stream_index];
+     uint8_t    *data = pkt ? pkt->data : NULL;
+     int         size = pkt ? pkt->size : 0;
+     int ret = 0, got_output = 0;
  
-     av_init_packet(pkt);
+     if (!pkt) {
+         av_init_packet(&flush_pkt);
+         pkt = &flush_pkt;
+         got_output = 1;
+     }
  
-     for(;;) {
-         /* select current input stream component */
-         st = s->cur_st;
-         if (st) {
-             if (!st->need_parsing || !st->parser) {
-                 /* no parsing needed: we just output the packet as is */
-                 /* raw data support */
-                 *pkt = st->cur_pkt;
-                 st->cur_pkt.data= NULL;
-                 st->cur_pkt.side_data_elems = 0;
-                 st->cur_pkt.side_data = NULL;
-                 compute_pkt_fields(s, st, NULL, pkt);
-                 s->cur_st = NULL;
-                 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
-                     (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
-                     ff_reduce_index(s, st->index);
-                     av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
-                 }
-                 break;
-             } else if (st->cur_len > 0 && st->discard < AVDISCARD_ALL) {
-                 len = av_parser_parse2(st->parser, st->codec, &pkt->data, &pkt->size,
-                                        st->cur_ptr, st->cur_len,
-                                        st->cur_pkt.pts, st->cur_pkt.dts,
-                                        st->cur_pkt.pos);
-                 st->cur_pkt.pts = AV_NOPTS_VALUE;
-                 st->cur_pkt.dts = AV_NOPTS_VALUE;
-                 /* increment read pointer */
-                 st->cur_ptr += len;
-                 st->cur_len -= len;
-                 /* return packet if any */
-                 if (pkt->size) {
-                 got_packet:
-                     pkt->duration = 0;
-                     if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
-                         if (st->codec->sample_rate > 0) {
-                             pkt->duration = av_rescale_q_rnd(st->parser->duration,
-                                                              (AVRational){ 1, st->codec->sample_rate },
-                                                              st->time_base,
-                                                              AV_ROUND_DOWN);
-                         }
-                     } else if (st->codec->time_base.num != 0 &&
-                                st->codec->time_base.den != 0) {
-                         pkt->duration = av_rescale_q_rnd(st->parser->duration,
-                                                          st->codec->time_base,
-                                                          st->time_base,
-                                                          AV_ROUND_DOWN);
-                     }
-                     pkt->stream_index = st->index;
-                     pkt->pts = st->parser->pts;
-                     pkt->dts = st->parser->dts;
-                     pkt->pos = st->parser->pos;
-                     if (st->parser->key_frame == 1 ||
-                         (st->parser->key_frame == -1 &&
-                          st->parser->pict_type == AV_PICTURE_TYPE_I))
-                         pkt->flags |= AV_PKT_FLAG_KEY;
-                     if(pkt->data == st->cur_pkt.data && pkt->size == st->cur_pkt.size){
-                         s->cur_st = NULL;
-                         pkt->destruct= st->cur_pkt.destruct;
-                         st->cur_pkt.destruct= NULL;
-                         st->cur_pkt.data    = NULL;
-                         assert(st->cur_len == 0);
-                     }else{
-                         pkt->destruct = NULL;
-                     }
-                     compute_pkt_fields(s, st, st->parser, pkt);
+     while (size > 0 || (pkt == &flush_pkt && got_output)) {
+         int len;
  
-                     if((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY){
-                         int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? pkt->pos : st->parser->frame_offset;
-                         ff_reduce_index(s, st->index);
-                         av_add_index_entry(st, pos, pkt->dts,
-                                            0, 0, AVINDEX_KEYFRAME);
-                     }
+         av_init_packet(&out_pkt);
+         len = av_parser_parse2(st->parser,  st->codec,
+                                &out_pkt.data, &out_pkt.size, data, size,
+                                pkt->pts, pkt->dts, pkt->pos);
  
-                     break;
-                 }
-             } else {
-                 /* free packet */
-                 av_free_packet(&st->cur_pkt);
-                 s->cur_st = NULL;
+         pkt->pts = pkt->dts = AV_NOPTS_VALUE;
+         /* increment read pointer */
+         data += len;
+         size -= len;
+         got_output = !!out_pkt.size;
+         if (!out_pkt.size)
+             continue;
+         /* set the duration */
+         out_pkt.duration = 0;
+         if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
+             if (st->codec->sample_rate > 0) {
+                 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
+                                                     (AVRational){ 1, st->codec->sample_rate },
+                                                     st->time_base,
+                                                     AV_ROUND_DOWN);
              }
-         } else {
-             AVPacket cur_pkt;
-             /* read next packet */
-             ret = av_read_packet(s, &cur_pkt);
-             if (ret < 0) {
-                 if (ret == AVERROR(EAGAIN))
-                     return ret;
-                 /* return the last frames, if any */
-                 for(i = 0; i < s->nb_streams; i++) {
-                     st = s->streams[i];
-                     if (st->parser && st->need_parsing) {
-                         av_parser_parse2(st->parser, st->codec,
-                                         &pkt->data, &pkt->size,
-                                         NULL, 0,
-                                         AV_NOPTS_VALUE, AV_NOPTS_VALUE,
-                                         AV_NOPTS_VALUE);
-                         if (pkt->size)
-                             goto got_packet;
-                     }
-                 }
-                 /* no more packets: really terminate parsing */
+         } else if (st->codec->time_base.num != 0 &&
+                    st->codec->time_base.den != 0) {
+             out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
+                                                 st->codec->time_base,
+                                                 st->time_base,
+                                                 AV_ROUND_DOWN);
+         }
+         out_pkt.stream_index = st->index;
+         out_pkt.pts = st->parser->pts;
+         out_pkt.dts = st->parser->dts;
+         out_pkt.pos = st->parser->pos;
+         if (st->parser->key_frame == 1 ||
+             (st->parser->key_frame == -1 &&
+              st->parser->pict_type == AV_PICTURE_TYPE_I))
+             out_pkt.flags |= AV_PKT_FLAG_KEY;
+         compute_pkt_fields(s, st, st->parser, &out_pkt);
+         if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+             out_pkt.flags & AV_PKT_FLAG_KEY) {
++            int64_t pos= (st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) ? out_pkt.pos : st->parser->frame_offset;
+             ff_reduce_index(s, st->index);
 -            av_add_index_entry(st, st->parser->frame_offset, out_pkt.dts,
++            av_add_index_entry(st, pos, out_pkt.dts,
+                                0, 0, AVINDEX_KEYFRAME);
+         }
+         if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
+             out_pkt.destruct = pkt->destruct;
+             pkt->destruct = NULL;
+         }
+         if ((ret = av_dup_packet(&out_pkt)) < 0)
+             goto fail;
+         if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
+             av_free_packet(&out_pkt);
+             ret = AVERROR(ENOMEM);
+             goto fail;
+         }
+     }
+     /* end of the stream => close and free the parser */
+     if (pkt == &flush_pkt) {
+         av_parser_close(st->parser);
+         st->parser = NULL;
+     }
+ fail:
+     av_free_packet(pkt);
+     return ret;
+ }
+ static int read_from_packet_buffer(AVPacketList **pkt_buffer,
+                                    AVPacketList **pkt_buffer_end,
+                                    AVPacket      *pkt)
+ {
+     AVPacketList *pktl;
+     av_assert0(*pkt_buffer);
+     pktl = *pkt_buffer;
+     *pkt = pktl->pkt;
+     *pkt_buffer = pktl->next;
+     if (!pktl->next)
+         *pkt_buffer_end = NULL;
+     av_freep(&pktl);
+     return 0;
+ }
+ static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
+ {
+     int ret = 0, i, got_packet = 0;
+     av_init_packet(pkt);
+     while (!got_packet && !s->parse_queue) {
+         AVStream *st;
+         AVPacket cur_pkt;
+         /* read next packet */
+         ret = av_read_packet(s, &cur_pkt);
+         if (ret < 0) {
+             if (ret == AVERROR(EAGAIN))
                  return ret;
+             /* flush the parsers */
+             for(i = 0; i < s->nb_streams; i++) {
+                 st = s->streams[i];
+                 if (st->parser && st->need_parsing)
+                     parse_packet(s, NULL, st->index);
              }
-             st = s->streams[cur_pkt.stream_index];
-             st->cur_pkt= cur_pkt;
-             if(st->cur_pkt.pts != AV_NOPTS_VALUE &&
-                st->cur_pkt.dts != AV_NOPTS_VALUE &&
-                st->cur_pkt.pts < st->cur_pkt.dts){
-                 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
-                     st->cur_pkt.stream_index,
-                     st->cur_pkt.pts,
-                     st->cur_pkt.dts,
-                     st->cur_pkt.size);
- //                av_free_packet(&st->cur_pkt);
- //                return -1;
+             /* all remaining packets are now in parse_queue =>
+              * really terminate parsing */
+             break;
+         }
+         ret = 0;
+         st  = s->streams[cur_pkt.stream_index];
+         if (cur_pkt.pts != AV_NOPTS_VALUE &&
+             cur_pkt.dts != AV_NOPTS_VALUE &&
+             cur_pkt.pts < cur_pkt.dts) {
+             av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d\n",
+                    cur_pkt.stream_index,
+                    cur_pkt.pts,
+                    cur_pkt.dts,
+                    cur_pkt.size);
+         }
+         if (s->debug & FF_FDEBUG_TS)
+             av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
+                    cur_pkt.stream_index,
+                    cur_pkt.pts,
+                    cur_pkt.dts,
+                    cur_pkt.size,
+                    cur_pkt.duration,
+                    cur_pkt.flags);
+         if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
+             st->parser = av_parser_init(st->codec->codec_id);
+             if (!st->parser) {
++                av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
++                       "%s, packets or times may be invalid.\n",
++                       avcodec_get_name(st->codec->codec_id));
+                 /* no parser available: just output the raw packets */
+                 st->need_parsing = AVSTREAM_PARSE_NONE;
+             } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
+                 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
+             } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
+                 st->parser->flags |= PARSER_FLAG_ONCE;
              }
+         }
  
-             if(s->debug & FF_FDEBUG_TS)
-                 av_log(s, AV_LOG_DEBUG, "av_read_packet stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
-                     st->cur_pkt.stream_index,
-                     st->cur_pkt.pts,
-                     st->cur_pkt.dts,
-                     st->cur_pkt.size,
-                     st->cur_pkt.duration,
-                     st->cur_pkt.flags);
-             s->cur_st = st;
-             st->cur_ptr = st->cur_pkt.data;
-             st->cur_len = st->cur_pkt.size;
-             if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
-                 st->parser = av_parser_init(st->codec->codec_id);
-                 if (!st->parser) {
-                     av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
-                            "%s, packets or times may be invalid.\n",
-                            avcodec_get_name(st->codec->codec_id));
-                     /* no parser available: just output the raw packets */
-                     st->need_parsing = AVSTREAM_PARSE_NONE;
-                 }else if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
-                     st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
-                 }else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE){
-                     st->parser->flags |= PARSER_FLAG_ONCE;
-                 }
+         if (!st->need_parsing || !st->parser) {
+             /* no parsing needed: we just output the packet as is */
+             *pkt = cur_pkt;
+             compute_pkt_fields(s, st, NULL, pkt);
+             if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
+                 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
+                 ff_reduce_index(s, st->index);
+                 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
              }
+             got_packet = 1;
+         } else if (st->discard < AVDISCARD_ALL) {
+             if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
+                 return ret;
+         } else {
+             /* free packet */
+             av_free_packet(&cur_pkt);
          }
      }
+     if (!got_packet && s->parse_queue)
+         ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
      if(s->debug & FF_FDEBUG_TS)
          av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%"PRId64", dts=%"PRId64", size=%d, duration=%d, flags=%d\n",
              pkt->stream_index,
@@@ -1365,15 -1333,10 +1406,11 @@@ void ff_read_frame_flush(AVFormatContex
          if (st->parser) {
              av_parser_close(st->parser);
              st->parser = NULL;
-             av_free_packet(&st->cur_pkt);
          }
          st->last_IP_pts = AV_NOPTS_VALUE;
 -        st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
 +        if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = 0;
 +        else                                st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
          st->reference_dts = AV_NOPTS_VALUE;
-         /* fail safe */
-         st->cur_ptr = NULL;
-         st->cur_len = 0;
  
          st->probe_packets = MAX_PROBE_PACKETS;
  
@@@ -2112,15 -2032,15 +2147,19 @@@ static int has_codec_parameters(AVStrea
      int val;
      switch (avctx->codec_type) {
      case AVMEDIA_TYPE_AUDIO:
-         val = avctx->sample_rate && avctx->channels && avctx->sample_fmt != AV_SAMPLE_FMT_NONE;
+         val = avctx->sample_rate && avctx->channels;
 +        if (!avctx->frame_size && determinable_frame_size(avctx))
 +            return 0;
+         if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
+             return 0;
          break;
      case AVMEDIA_TYPE_VIDEO:
-         val = avctx->width && avctx->pix_fmt != PIX_FMT_NONE;
+         val = avctx->width;
+         if (st->info->found_decoder >= 0 && avctx->pix_fmt == PIX_FMT_NONE)
+             return 0;
          break;
 +    case AVMEDIA_TYPE_DATA:
 +        if(avctx->codec_id == CODEC_ID_NONE) return 1;
      default:
          val = 1;
          break;
@@@ -2481,37 -2413,6 +2529,40 @@@ int avformat_find_stream_info(AVFormatC
          count++;
      }
  
-         int err;
 +    if (flush_codecs) {
 +        AVPacket empty_pkt = { 0 };
-             do {
-                 err = try_decode_frame(st, &empty_pkt,
-                                            (options && i < orig_nb_streams) ?
-                                            &options[i] : NULL);
-             } while (err > 0 && !has_codec_parameters(st->codec));
-             if (err < 0) {
-                 av_log(ic, AV_LOG_INFO,
-                        "decoding for stream %d failed\n", st->index);
++        int err = 0;
 +        av_init_packet(&empty_pkt);
 +
 +        ret = -1; /* we could not have all the codec parameters before EOF */
 +        for(i=0;i<ic->nb_streams;i++) {
 +            st = ic->streams[i];
 +
 +            /* flush the decoders */
-             if (!has_codec_parameters(st->codec)){
++            if (st->info->found_decoder == 1) {
++                do {
++                    err = try_decode_frame(st, &empty_pkt,
++                                            (options && i < orig_nb_streams) ?
++                                            &options[i] : NULL);
++                } while (err > 0 && !has_codec_parameters(st));
++
++                if (err < 0) {
++                    av_log(ic, AV_LOG_INFO,
++                        "decoding for stream %d failed\n", st->index);
++                }
 +            }
++
++            if (!has_codec_parameters(st)){
 +                char buf[256];
 +                avcodec_string(buf, sizeof(buf), st->codec, 0);
 +                av_log(ic, AV_LOG_WARNING,
 +                       "Could not find codec parameters (%s)\n", buf);
 +            } else {
 +                ret = 0;
 +            }
 +        }
 +    }
 +
      // close codecs which were opened in try_decode_frame()
      for(i=0;i<ic->nb_streams;i++) {
          st = ic->streams[i];