Merge commit '9df889a5f116c1ee78c2f239e0ba599c492431aa'
authorClément Bœsch <u@pkh.me>
Fri, 29 Jul 2016 09:01:36 +0000 (11:01 +0200)
committerClément Bœsch <u@pkh.me>
Fri, 29 Jul 2016 09:01:36 +0000 (11:01 +0200)
* commit '9df889a5f116c1ee78c2f239e0ba599c492431aa':
  h264: rename h264.[ch] to h264dec.[ch]

Merged-by: Clément Bœsch <u@pkh.me>
39 files changed:
1  2 
libavcodec/Makefile
libavcodec/crystalhd.c
libavcodec/dxva2_h264.c
libavcodec/h264_cabac.c
libavcodec/h264_cavlc.c
libavcodec/h264_direct.c
libavcodec/h264_loopfilter.c
libavcodec/h264_mb.c
libavcodec/h264_mc_template.c
libavcodec/h264_mvpred.h
libavcodec/h264_parse.c
libavcodec/h264_parser.c
libavcodec/h264_picture.c
libavcodec/h264_ps.c
libavcodec/h264_refs.c
libavcodec/h264_sei.c
libavcodec/h264_slice.c
libavcodec/h264data.c
libavcodec/h264data.h
libavcodec/h264dec.c
libavcodec/h264dec.h
libavcodec/h264idct_template.c
libavcodec/mediacodecdec_h264.c
libavcodec/mips/h264chroma_mips.h
libavcodec/mips/h264dsp_mips.h
libavcodec/omx.c
libavcodec/ppc/h264dsp.c
libavcodec/qsvenc_h264.c
libavcodec/svq3.c
libavcodec/vaapi_encode_h264.c
libavcodec/vaapi_h264.c
libavcodec/vda_h264.c
libavcodec/vda_h264_dec.c
libavcodec/vdpau.c
libavcodec/vdpau_compat.h
libavcodec/vdpau_h264.c
libavcodec/videotoolbox.c
libavcodec/x86/h264_qpel.c
libavformat/mxfenc.c

Simple merge
index 3cb32a8,0000000..d6ebcee
mode 100644,000000..100644
--- /dev/null
@@@ -1,1226 -1,0 +1,1226 @@@
- #include "h264.h"
 +/*
 + * - CrystalHD decoder module -
 + *
 + * Copyright(C) 2010,2011 Philip Langdale <ffmpeg.philipl@overt.org>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/*
 + * - Principles of Operation -
 + *
 + * The CrystalHD decoder operates at the bitstream level - which is an even
 + * higher level than the decoding hardware you typically see in modern GPUs.
 + * This means it has a very simple interface, in principle. You feed demuxed
 + * packets in one end and get decoded picture (fields/frames) out the other.
 + *
 + * Of course, nothing is ever that simple. Due, at the very least, to b-frame
 + * dependencies in the supported formats, the hardware has a delay between
 + * when a packet goes in, and when a picture comes out. Furthermore, this delay
 + * is not just a function of time, but also one of the dependency on additional
 + * frames being fed into the decoder to satisfy the b-frame dependencies.
 + *
 + * As such, a pipeline will build up that is roughly equivalent to the required
 + * DPB for the file being played. If that was all it took, things would still
 + * be simple - so, of course, it isn't.
 + *
 + * The hardware has a way of indicating that a picture is ready to be copied out,
 + * but this is unreliable - and sometimes the attempt will still fail so, based
 + * on testing, the code will wait until 3 pictures are ready before starting
 + * to copy out - and this has the effect of extending the pipeline.
 + *
 + * Finally, while it is tempting to say that once the decoder starts outputting
 + * frames, the software should never fail to return a frame from a decode(),
 + * this is a hard assertion to make, because the stream may switch between
 + * differently encoded content (number of b-frames, interlacing, etc) which
 + * might require a longer pipeline than before. If that happened, you could
 + * deadlock trying to retrieve a frame that can't be decoded without feeding
 + * in additional packets.
 + *
 + * As such, the code will return in the event that a picture cannot be copied
 + * out, leading to an increase in the length of the pipeline. This in turn,
 + * means we have to be sensitive to the time it takes to decode a picture;
 + * We do not want to give up just because the hardware needed a little more
 + * time to prepare the picture! For this reason, there are delays included
 + * in the decode() path that ensure that, under normal conditions, the hardware
 + * will only fail to return a frame if it really needs additional packets to
 + * complete the decoding.
 + *
 + * Finally, to be explicit, we do not want the pipeline to grow without bound
 + * for two reasons: 1) The hardware can only buffer a finite number of packets,
 + * and 2) The client application may not be able to cope with arbitrarily long
 + * delays in the video path relative to the audio path. For example. MPlayer
 + * can only handle a 20 picture delay (although this is arbitrary, and needs
 + * to be extended to fully support the CrystalHD where the delay could be up
 + * to 32 pictures - consider PAFF H.264 content with 16 b-frames).
 + */
 +
 +/*****************************************************************************
 + * Includes
 + ****************************************************************************/
 +
 +#define _XOPEN_SOURCE 600
 +#include <inttypes.h>
 +#include <stdio.h>
 +#include <stdlib.h>
 +
 +#include <libcrystalhd/bc_dts_types.h>
 +#include <libcrystalhd/bc_dts_defs.h>
 +#include <libcrystalhd/libcrystalhd_if.h>
 +
 +#include "avcodec.h"
++#include "h264dec.h"
 +#include "internal.h"
 +#include "libavutil/imgutils.h"
 +#include "libavutil/intreadwrite.h"
 +#include "libavutil/opt.h"
 +
 +#if HAVE_UNISTD_H
 +#include <unistd.h>
 +#endif
 +
 +/** Timeout parameter passed to DtsProcOutput() in us */
 +#define OUTPUT_PROC_TIMEOUT 50
 +/** Step between fake timestamps passed to hardware in units of 100ns */
 +#define TIMESTAMP_UNIT 100000
 +/** Initial value in us of the wait in decode() */
 +#define BASE_WAIT 10000
 +/** Increment in us to adjust wait in decode() */
 +#define WAIT_UNIT 1000
 +
 +
 +/*****************************************************************************
 + * Module private data
 + ****************************************************************************/
 +
 +typedef enum {
 +    RET_ERROR           = -1,
 +    RET_OK              = 0,
 +    RET_COPY_AGAIN      = 1,
 +    RET_SKIP_NEXT_COPY  = 2,
 +    RET_COPY_NEXT_FIELD = 3,
 +} CopyRet;
 +
 +typedef struct OpaqueList {
 +    struct OpaqueList *next;
 +    uint64_t fake_timestamp;
 +    uint64_t reordered_opaque;
 +    uint8_t pic_type;
 +} OpaqueList;
 +
 +typedef struct {
 +    AVClass *av_class;
 +    AVCodecContext *avctx;
 +    AVFrame *pic;
 +    HANDLE dev;
 +
 +    uint8_t *orig_extradata;
 +    uint32_t orig_extradata_size;
 +
 +    AVBitStreamFilterContext *bsfc;
 +    AVCodecParserContext *parser;
 +
 +    uint8_t is_70012;
 +    uint8_t *sps_pps_buf;
 +    uint32_t sps_pps_size;
 +    uint8_t is_nal;
 +    uint8_t output_ready;
 +    uint8_t need_second_field;
 +    uint8_t skip_next_output;
 +    uint64_t decode_wait;
 +
 +    uint64_t last_picture;
 +
 +    OpaqueList *head;
 +    OpaqueList *tail;
 +
 +    /* Options */
 +    uint32_t sWidth;
 +    uint8_t bframe_bug;
 +} CHDContext;
 +
 +static const AVOption options[] = {
 +    { "crystalhd_downscale_width",
 +      "Turn on downscaling to the specified width",
 +      offsetof(CHDContext, sWidth),
 +      AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT32_MAX,
 +      AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM, },
 +    { NULL, },
 +};
 +
 +
 +/*****************************************************************************
 + * Helper functions
 + ****************************************************************************/
 +
 +static inline BC_MEDIA_SUBTYPE id2subtype(CHDContext *priv, enum AVCodecID id)
 +{
 +    switch (id) {
 +    case AV_CODEC_ID_MPEG4:
 +        return BC_MSUBTYPE_DIVX;
 +    case AV_CODEC_ID_MSMPEG4V3:
 +        return BC_MSUBTYPE_DIVX311;
 +    case AV_CODEC_ID_MPEG2VIDEO:
 +        return BC_MSUBTYPE_MPEG2VIDEO;
 +    case AV_CODEC_ID_VC1:
 +        return BC_MSUBTYPE_VC1;
 +    case AV_CODEC_ID_WMV3:
 +        return BC_MSUBTYPE_WMV3;
 +    case AV_CODEC_ID_H264:
 +        return priv->is_nal ? BC_MSUBTYPE_AVC1 : BC_MSUBTYPE_H264;
 +    default:
 +        return BC_MSUBTYPE_INVALID;
 +    }
 +}
 +
 +static inline void print_frame_info(CHDContext *priv, BC_DTS_PROC_OUT *output)
 +{
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffSz: %u\n", output->YbuffSz);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tYBuffDoneSz: %u\n",
 +           output->YBuffDoneSz);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tUVBuffDoneSz: %u\n",
 +           output->UVBuffDoneSz);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tTimestamp: %"PRIu64"\n",
 +           output->PicInfo.timeStamp);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tPicture Number: %u\n",
 +           output->PicInfo.picture_number);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tWidth: %u\n",
 +           output->PicInfo.width);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tHeight: %u\n",
 +           output->PicInfo.height);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tChroma: 0x%03x\n",
 +           output->PicInfo.chroma_format);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tPulldown: %u\n",
 +           output->PicInfo.pulldown);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tFlags: 0x%08x\n",
 +           output->PicInfo.flags);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrame Rate/Res: %u\n",
 +           output->PicInfo.frame_rate);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tAspect Ratio: %u\n",
 +           output->PicInfo.aspect_ratio);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tColor Primaries: %u\n",
 +           output->PicInfo.colour_primaries);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tMetaData: %u\n",
 +           output->PicInfo.picture_meta_payload);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tSession Number: %u\n",
 +           output->PicInfo.sess_num);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tycom: %u\n",
 +           output->PicInfo.ycom);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tCustom Aspect: %u\n",
 +           output->PicInfo.custom_aspect_ratio_width_height);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tFrames to Drop: %u\n",
 +           output->PicInfo.n_drop);
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "\tH264 Valid Fields: 0x%08x\n",
 +           output->PicInfo.other.h264.valid);
 +}
 +
 +
 +/*****************************************************************************
 + * OpaqueList functions
 + ****************************************************************************/
 +
 +static uint64_t opaque_list_push(CHDContext *priv, uint64_t reordered_opaque,
 +                                 uint8_t pic_type)
 +{
 +    OpaqueList *newNode = av_mallocz(sizeof (OpaqueList));
 +    if (!newNode) {
 +        av_log(priv->avctx, AV_LOG_ERROR,
 +               "Unable to allocate new node in OpaqueList.\n");
 +        return 0;
 +    }
 +    if (!priv->head) {
 +        newNode->fake_timestamp = TIMESTAMP_UNIT;
 +        priv->head              = newNode;
 +    } else {
 +        newNode->fake_timestamp = priv->tail->fake_timestamp + TIMESTAMP_UNIT;
 +        priv->tail->next        = newNode;
 +    }
 +    priv->tail = newNode;
 +    newNode->reordered_opaque = reordered_opaque;
 +    newNode->pic_type = pic_type;
 +
 +    return newNode->fake_timestamp;
 +}
 +
 +/*
 + * The OpaqueList is built in decode order, while elements will be removed
 + * in presentation order. If frames are reordered, this means we must be
 + * able to remove elements that are not the first element.
 + *
 + * Returned node must be freed by caller.
 + */
 +static OpaqueList *opaque_list_pop(CHDContext *priv, uint64_t fake_timestamp)
 +{
 +    OpaqueList *node = priv->head;
 +
 +    if (!priv->head) {
 +        av_log(priv->avctx, AV_LOG_ERROR,
 +               "CrystalHD: Attempted to query non-existent timestamps.\n");
 +        return NULL;
 +    }
 +
 +    /*
 +     * The first element is special-cased because we have to manipulate
 +     * the head pointer rather than the previous element in the list.
 +     */
 +    if (priv->head->fake_timestamp == fake_timestamp) {
 +        priv->head = node->next;
 +
 +        if (!priv->head->next)
 +            priv->tail = priv->head;
 +
 +        node->next = NULL;
 +        return node;
 +    }
 +
 +    /*
 +     * The list is processed at arm's length so that we have the
 +     * previous element available to rewrite its next pointer.
 +     */
 +    while (node->next) {
 +        OpaqueList *current = node->next;
 +        if (current->fake_timestamp == fake_timestamp) {
 +            node->next = current->next;
 +
 +            if (!node->next)
 +               priv->tail = node;
 +
 +            current->next = NULL;
 +            return current;
 +        } else {
 +            node = current;
 +        }
 +    }
 +
 +    av_log(priv->avctx, AV_LOG_VERBOSE,
 +           "CrystalHD: Couldn't match fake_timestamp.\n");
 +    return NULL;
 +}
 +
 +
 +/*****************************************************************************
 + * Video decoder API function definitions
 + ****************************************************************************/
 +
 +static void flush(AVCodecContext *avctx)
 +{
 +    CHDContext *priv = avctx->priv_data;
 +
 +    avctx->has_b_frames     = 0;
 +    priv->last_picture      = -1;
 +    priv->output_ready      = 0;
 +    priv->need_second_field = 0;
 +    priv->skip_next_output  = 0;
 +    priv->decode_wait       = BASE_WAIT;
 +
 +    av_frame_unref (priv->pic);
 +
 +    /* Flush mode 4 flushes all software and hardware buffers. */
 +    DtsFlushInput(priv->dev, 4);
 +}
 +
 +
 +static av_cold int uninit(AVCodecContext *avctx)
 +{
 +    CHDContext *priv = avctx->priv_data;
 +    HANDLE device;
 +
 +    device = priv->dev;
 +    DtsStopDecoder(device);
 +    DtsCloseDecoder(device);
 +    DtsDeviceClose(device);
 +
 +    /*
 +     * Restore original extradata, so that if the decoder is
 +     * reinitialised, the bitstream detection and filtering
 +     * will work as expected.
 +     */
 +    if (priv->orig_extradata) {
 +        av_free(avctx->extradata);
 +        avctx->extradata = priv->orig_extradata;
 +        avctx->extradata_size = priv->orig_extradata_size;
 +        priv->orig_extradata = NULL;
 +        priv->orig_extradata_size = 0;
 +    }
 +
 +    av_parser_close(priv->parser);
 +    if (priv->bsfc) {
 +        av_bitstream_filter_close(priv->bsfc);
 +    }
 +
 +    av_freep(&priv->sps_pps_buf);
 +
 +    av_frame_free (&priv->pic);
 +
 +    if (priv->head) {
 +       OpaqueList *node = priv->head;
 +       while (node) {
 +          OpaqueList *next = node->next;
 +          av_free(node);
 +          node = next;
 +       }
 +    }
 +
 +    return 0;
 +}
 +
 +
 +static av_cold int init(AVCodecContext *avctx)
 +{
 +    CHDContext* priv;
 +    BC_STATUS ret;
 +    BC_INFO_CRYSTAL version;
 +    BC_INPUT_FORMAT format = {
 +        .FGTEnable   = FALSE,
 +        .Progressive = TRUE,
 +        .OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
 +        .width       = avctx->width,
 +        .height      = avctx->height,
 +    };
 +
 +    BC_MEDIA_SUBTYPE subtype;
 +
 +    uint32_t mode = DTS_PLAYBACK_MODE |
 +                    DTS_LOAD_FILE_PLAY_FW |
 +                    DTS_SKIP_TX_CHK_CPB |
 +                    DTS_PLAYBACK_DROP_RPT_MODE |
 +                    DTS_SINGLE_THREADED_MODE |
 +                    DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);
 +
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
 +           avctx->codec->name);
 +
 +    avctx->pix_fmt = AV_PIX_FMT_YUYV422;
 +
 +    /* Initialize the library */
 +    priv               = avctx->priv_data;
 +    priv->avctx        = avctx;
 +    priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
 +    priv->last_picture = -1;
 +    priv->decode_wait  = BASE_WAIT;
 +    priv->pic          = av_frame_alloc();
 +
 +    subtype = id2subtype(priv, avctx->codec->id);
 +    switch (subtype) {
 +    case BC_MSUBTYPE_AVC1:
 +        {
 +            uint8_t *dummy_p;
 +            int dummy_int;
 +
 +            /* Back up the extradata so it can be restored at close time. */
 +            priv->orig_extradata = av_malloc(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
 +            if (!priv->orig_extradata) {
 +                av_log(avctx, AV_LOG_ERROR,
 +                       "Failed to allocate copy of extradata\n");
 +                return AVERROR(ENOMEM);
 +            }
 +            priv->orig_extradata_size = avctx->extradata_size;
 +            memcpy(priv->orig_extradata, avctx->extradata, avctx->extradata_size);
 +
 +            priv->bsfc = av_bitstream_filter_init("h264_mp4toannexb");
 +            if (!priv->bsfc) {
 +                av_log(avctx, AV_LOG_ERROR,
 +                       "Cannot open the h264_mp4toannexb BSF!\n");
 +                return AVERROR_BSF_NOT_FOUND;
 +            }
 +            av_bitstream_filter_filter(priv->bsfc, avctx, NULL, &dummy_p,
 +                                       &dummy_int, NULL, 0, 0);
 +        }
 +        subtype = BC_MSUBTYPE_H264;
 +        // Fall-through
 +    case BC_MSUBTYPE_H264:
 +        format.startCodeSz = 4;
 +        // Fall-through
 +    case BC_MSUBTYPE_VC1:
 +    case BC_MSUBTYPE_WVC1:
 +    case BC_MSUBTYPE_WMV3:
 +    case BC_MSUBTYPE_WMVA:
 +    case BC_MSUBTYPE_MPEG2VIDEO:
 +    case BC_MSUBTYPE_DIVX:
 +    case BC_MSUBTYPE_DIVX311:
 +        format.pMetaData  = avctx->extradata;
 +        format.metaDataSz = avctx->extradata_size;
 +        break;
 +    default:
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
 +        return AVERROR(EINVAL);
 +    }
 +    format.mSubtype = subtype;
 +
 +    if (priv->sWidth) {
 +        format.bEnableScaling = 1;
 +        format.ScalingParams.sWidth = priv->sWidth;
 +    }
 +
 +    /* Get a decoder instance */
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
 +    // Initialize the Link and Decoder devices
 +    ret = DtsDeviceOpen(&priv->dev, mode);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsCrystalHDVersion(priv->dev, &version);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_VERBOSE,
 +               "CrystalHD: DtsCrystalHDVersion failed\n");
 +        goto fail;
 +    }
 +    priv->is_70012 = version.device == 0;
 +
 +    if (priv->is_70012 &&
 +        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) {
 +        av_log(avctx, AV_LOG_VERBOSE,
 +               "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsSetInputFormat(priv->dev, &format);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
 +        goto fail;
 +    }
 +
 +    ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
 +        goto fail;
 +    }
 +    ret = DtsStartDecoder(priv->dev);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
 +        goto fail;
 +    }
 +    ret = DtsStartCapture(priv->dev);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
 +        goto fail;
 +    }
 +
 +    if (avctx->codec->id == AV_CODEC_ID_H264) {
 +        priv->parser = av_parser_init(avctx->codec->id);
 +        if (!priv->parser)
 +            av_log(avctx, AV_LOG_WARNING,
 +                   "Cannot open the h.264 parser! Interlaced h.264 content "
 +                   "will not be detected reliably.\n");
 +        priv->parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
 +    }
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");
 +
 +    return 0;
 +
 + fail:
 +    uninit(avctx);
 +    return -1;
 +}
 +
 +
 +static inline CopyRet copy_frame(AVCodecContext *avctx,
 +                                 BC_DTS_PROC_OUT *output,
 +                                 void *data, int *got_frame)
 +{
 +    BC_STATUS ret;
 +    BC_DTS_STATUS decoder_status = { 0, };
 +    uint8_t trust_interlaced;
 +    uint8_t interlaced;
 +
 +    CHDContext *priv = avctx->priv_data;
 +    int64_t pkt_pts  = AV_NOPTS_VALUE;
 +    uint8_t pic_type = 0;
 +
 +    uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
 +                           VDEC_FLAG_BOTTOMFIELD;
 +    uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
 +
 +    int width    = output->PicInfo.width;
 +    int height   = output->PicInfo.height;
 +    int bwidth;
 +    uint8_t *src = output->Ybuff;
 +    int sStride;
 +    uint8_t *dst;
 +    int dStride;
 +
 +    if (output->PicInfo.timeStamp != 0) {
 +        OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
 +        if (node) {
 +            pkt_pts = node->reordered_opaque;
 +            pic_type = node->pic_type;
 +            av_free(node);
 +        } else {
 +            /*
 +             * We will encounter a situation where a timestamp cannot be
 +             * popped if a second field is being returned. In this case,
 +             * each field has the same timestamp and the first one will
 +             * cause it to be popped. To keep subsequent calculations
 +             * simple, pic_type should be set a FIELD value - doesn't
 +             * matter which, but I chose BOTTOM.
 +             */
 +            pic_type = PICT_BOTTOM_FIELD;
 +        }
 +        av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
 +               output->PicInfo.timeStamp);
 +        av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
 +               pic_type);
 +    }
 +
 +    ret = DtsGetDriverStatus(priv->dev, &decoder_status);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR,
 +               "CrystalHD: GetDriverStatus failed: %u\n", ret);
 +       return RET_ERROR;
 +    }
 +
 +    /*
 +     * For most content, we can trust the interlaced flag returned
 +     * by the hardware, but sometimes we can't. These are the
 +     * conditions under which we can trust the flag:
 +     *
 +     * 1) It's not h.264 content
 +     * 2) The UNKNOWN_SRC flag is not set
 +     * 3) We know we're expecting a second field
 +     * 4) The hardware reports this picture and the next picture
 +     *    have the same picture number.
 +     *
 +     * Note that there can still be interlaced content that will
 +     * fail this check, if the hardware hasn't decoded the next
 +     * picture or if there is a corruption in the stream. (In either
 +     * case a 0 will be returned for the next picture number)
 +     */
 +    trust_interlaced = avctx->codec->id != AV_CODEC_ID_H264 ||
 +                       !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
 +                       priv->need_second_field ||
 +                       (decoder_status.picNumFlags & ~0x40000000) ==
 +                       output->PicInfo.picture_number;
 +
 +    /*
 +     * If we got a false negative for trust_interlaced on the first field,
 +     * we will realise our mistake here when we see that the picture number is that
 +     * of the previous picture. We cannot recover the frame and should discard the
 +     * second field to keep the correct number of output frames.
 +     */
 +    if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
 +        av_log(avctx, AV_LOG_WARNING,
 +               "Incorrectly guessed progressive frame. Discarding second field\n");
 +        /* Returning without providing a picture. */
 +        return RET_OK;
 +    }
 +
 +    interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
 +                 trust_interlaced;
 +
 +    if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
 +        av_log(avctx, AV_LOG_VERBOSE,
 +               "Next picture number unknown. Assuming progressive frame.\n");
 +    }
 +
 +    av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
 +           interlaced, trust_interlaced);
 +
 +    if (priv->pic->data[0] && !priv->need_second_field)
 +        av_frame_unref(priv->pic);
 +
 +    priv->need_second_field = interlaced && !priv->need_second_field;
 +
 +    if (!priv->pic->data[0]) {
 +        if (ff_get_buffer(avctx, priv->pic, AV_GET_BUFFER_FLAG_REF) < 0)
 +            return RET_ERROR;
 +    }
 +
 +    bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
 +    if (priv->is_70012) {
 +        int pStride;
 +
 +        if (width <= 720)
 +            pStride = 720;
 +        else if (width <= 1280)
 +            pStride = 1280;
 +        else pStride = 1920;
 +        sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
 +    } else {
 +        sStride = bwidth;
 +    }
 +
 +    dStride = priv->pic->linesize[0];
 +    dst     = priv->pic->data[0];
 +
 +    av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");
 +
 +    if (interlaced) {
 +        int dY = 0;
 +        int sY = 0;
 +
 +        height /= 2;
 +        if (bottom_field) {
 +            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
 +            dY = 1;
 +        } else {
 +            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
 +            dY = 0;
 +        }
 +
 +        for (sY = 0; sY < height; dY++, sY++) {
 +            memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
 +            dY++;
 +        }
 +    } else {
 +        av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
 +    }
 +
 +    priv->pic->interlaced_frame = interlaced;
 +    if (interlaced)
 +        priv->pic->top_field_first = !bottom_first;
 +
 +    priv->pic->pkt_pts = pkt_pts;
 +
 +    if (!priv->need_second_field) {
 +        *got_frame       = 1;
 +        if ((ret = av_frame_ref(data, priv->pic)) < 0) {
 +            return ret;
 +        }
 +    }
 +
 +    /*
 +     * Two types of PAFF content have been observed. One form causes the
 +     * hardware to return a field pair and the other individual fields,
 +     * even though the input is always individual fields. We must skip
 +     * copying on the next decode() call to maintain pipeline length in
 +     * the first case.
 +     */
 +    if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
 +        (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
 +        av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
 +        return RET_SKIP_NEXT_COPY;
 +    }
 +
 +    /*
 +     * The logic here is purely based on empirical testing with samples.
 +     * If we need a second field, it could come from a second input packet,
 +     * or it could come from the same field-pair input packet at the current
 +     * field. In the first case, we should return and wait for the next time
 +     * round to get the second field, while in the second case, we should
 +     * ask the decoder for it immediately.
 +     *
 +     * Testing has shown that we are dealing with the fieldpair -> two fields
 +     * case if the VDEC_FLAG_UNKNOWN_SRC is not set or if the input picture
 +     * type was PICT_FRAME (in this second case, the flag might still be set)
 +     */
 +    return priv->need_second_field &&
 +           (!(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
 +            pic_type == PICT_FRAME) ?
 +           RET_COPY_NEXT_FIELD : RET_OK;
 +}
 +
 +
 +static inline CopyRet receive_frame(AVCodecContext *avctx,
 +                                    void *data, int *got_frame)
 +{
 +    BC_STATUS ret;
 +    BC_DTS_PROC_OUT output = {
 +        .PicInfo.width  = avctx->width,
 +        .PicInfo.height = avctx->height,
 +    };
 +    CHDContext *priv = avctx->priv_data;
 +    HANDLE dev       = priv->dev;
 +
 +    *got_frame = 0;
 +
 +    // Request decoded data from the driver
 +    ret = DtsProcOutputNoCopy(dev, OUTPUT_PROC_TIMEOUT, &output);
 +    if (ret == BC_STS_FMT_CHANGE) {
 +        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Initial format change\n");
 +        avctx->width  = output.PicInfo.width;
 +        avctx->height = output.PicInfo.height;
 +        switch ( output.PicInfo.aspect_ratio ) {
 +        case vdecAspectRatioSquare:
 +            avctx->sample_aspect_ratio = (AVRational) {  1,  1};
 +            break;
 +        case vdecAspectRatio12_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 12, 11};
 +            break;
 +        case vdecAspectRatio10_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 10, 11};
 +            break;
 +        case vdecAspectRatio16_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 16, 11};
 +            break;
 +        case vdecAspectRatio40_33:
 +            avctx->sample_aspect_ratio = (AVRational) { 40, 33};
 +            break;
 +        case vdecAspectRatio24_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 24, 11};
 +            break;
 +        case vdecAspectRatio20_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 20, 11};
 +            break;
 +        case vdecAspectRatio32_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 32, 11};
 +            break;
 +        case vdecAspectRatio80_33:
 +            avctx->sample_aspect_ratio = (AVRational) { 80, 33};
 +            break;
 +        case vdecAspectRatio18_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 18, 11};
 +            break;
 +        case vdecAspectRatio15_11:
 +            avctx->sample_aspect_ratio = (AVRational) { 15, 11};
 +            break;
 +        case vdecAspectRatio64_33:
 +            avctx->sample_aspect_ratio = (AVRational) { 64, 33};
 +            break;
 +        case vdecAspectRatio160_99:
 +            avctx->sample_aspect_ratio = (AVRational) {160, 99};
 +            break;
 +        case vdecAspectRatio4_3:
 +            avctx->sample_aspect_ratio = (AVRational) {  4,  3};
 +            break;
 +        case vdecAspectRatio16_9:
 +            avctx->sample_aspect_ratio = (AVRational) { 16,  9};
 +            break;
 +        case vdecAspectRatio221_1:
 +            avctx->sample_aspect_ratio = (AVRational) {221,  1};
 +            break;
 +        }
 +        return RET_COPY_AGAIN;
 +    } else if (ret == BC_STS_SUCCESS) {
 +        int copy_ret = -1;
 +        if (output.PoutFlags & BC_POUT_FLAGS_PIB_VALID) {
 +            if (priv->last_picture == -1) {
 +                /*
 +                 * Init to one less, so that the incrementing code doesn't
 +                 * need to be special-cased.
 +                 */
 +                priv->last_picture = output.PicInfo.picture_number - 1;
 +            }
 +
 +            if (avctx->codec->id == AV_CODEC_ID_MPEG4 &&
 +                output.PicInfo.timeStamp == 0 && priv->bframe_bug) {
 +                av_log(avctx, AV_LOG_VERBOSE,
 +                       "CrystalHD: Not returning packed frame twice.\n");
 +                priv->last_picture++;
 +                DtsReleaseOutputBuffs(dev, NULL, FALSE);
 +                return RET_COPY_AGAIN;
 +            }
 +
 +            print_frame_info(priv, &output);
 +
 +            if (priv->last_picture + 1 < output.PicInfo.picture_number) {
 +                av_log(avctx, AV_LOG_WARNING,
 +                       "CrystalHD: Picture Number discontinuity\n");
 +                /*
 +                 * Have we lost frames? If so, we need to shrink the
 +                 * pipeline length appropriately.
 +                 *
 +                 * XXX: I have no idea what the semantics of this situation
 +                 * are so I don't even know if we've lost frames or which
 +                 * ones.
 +                 *
 +                 * In any case, only warn the first time.
 +                 */
 +               priv->last_picture = output.PicInfo.picture_number - 1;
 +            }
 +
 +            copy_ret = copy_frame(avctx, &output, data, got_frame);
 +            if (*got_frame > 0) {
 +                avctx->has_b_frames--;
 +                priv->last_picture++;
 +                av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Pipeline length: %u\n",
 +                       avctx->has_b_frames);
 +            }
 +        } else {
 +            /*
 +             * An invalid frame has been consumed.
 +             */
 +            av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput succeeded with "
 +                                        "invalid PIB\n");
 +            avctx->has_b_frames--;
 +            copy_ret = RET_OK;
 +        }
 +        DtsReleaseOutputBuffs(dev, NULL, FALSE);
 +
 +        return copy_ret;
 +    } else if (ret == BC_STS_BUSY) {
 +        return RET_COPY_AGAIN;
 +    } else {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: ProcOutput failed %d\n", ret);
 +        return RET_ERROR;
 +    }
 +}
 +
 +
 +static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
 +{
 +    BC_STATUS ret;
 +    BC_DTS_STATUS decoder_status = { 0, };
 +    CopyRet rec_ret;
 +    CHDContext *priv   = avctx->priv_data;
 +    HANDLE dev         = priv->dev;
 +    uint8_t *in_data   = avpkt->data;
 +    int len            = avpkt->size;
 +    int free_data      = 0;
 +    uint8_t pic_type   = 0;
 +
 +    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: decode_frame\n");
 +
 +    if (avpkt->size == 7 && !priv->bframe_bug) {
 +        /*
 +         * The use of a drop frame triggers the bug
 +         */
 +        av_log(avctx, AV_LOG_INFO,
 +               "CrystalHD: Enabling work-around for packed b-frame bug\n");
 +        priv->bframe_bug = 1;
 +    } else if (avpkt->size == 8 && priv->bframe_bug) {
 +        /*
 +         * Delay frames don't trigger the bug
 +         */
 +        av_log(avctx, AV_LOG_INFO,
 +               "CrystalHD: Disabling work-around for packed b-frame bug\n");
 +        priv->bframe_bug = 0;
 +    }
 +
 +    if (len) {
 +        int32_t tx_free = (int32_t)DtsTxFreeSize(dev);
 +
 +        if (priv->parser) {
 +            int ret = 0;
 +
 +            if (priv->bsfc) {
 +                ret = av_bitstream_filter_filter(priv->bsfc, avctx, NULL,
 +                                                 &in_data, &len,
 +                                                 avpkt->data, len, 0);
 +            }
 +            free_data = ret > 0;
 +
 +            if (ret >= 0) {
 +                uint8_t *pout;
 +                int psize;
 +                int index;
 +                H264Context *h = priv->parser->priv_data;
 +
 +                index = av_parser_parse2(priv->parser, avctx, &pout, &psize,
 +                                         in_data, len, avctx->internal->pkt->pts,
 +                                         avctx->internal->pkt->dts, 0);
 +                if (index < 0) {
 +                    av_log(avctx, AV_LOG_WARNING,
 +                           "CrystalHD: Failed to parse h.264 packet to "
 +                           "detect interlacing.\n");
 +                } else if (index != len) {
 +                    av_log(avctx, AV_LOG_WARNING,
 +                           "CrystalHD: Failed to parse h.264 packet "
 +                           "completely. Interlaced frames may be "
 +                           "incorrectly detected.\n");
 +                } else {
 +                    av_log(avctx, AV_LOG_VERBOSE,
 +                           "CrystalHD: parser picture type %d\n",
 +                           h->picture_structure);
 +                    pic_type = h->picture_structure;
 +                }
 +            } else {
 +                av_log(avctx, AV_LOG_WARNING,
 +                       "CrystalHD: mp4toannexb filter failed to filter "
 +                       "packet. Interlaced frames may be incorrectly "
 +                       "detected.\n");
 +            }
 +        }
 +
 +        if (len < tx_free - 1024) {
 +            /*
 +             * Despite being notionally opaque, either libcrystalhd or
 +             * the hardware itself will mangle pts values that are too
 +             * small or too large. The docs claim it should be in units
 +             * of 100ns. Given that we're nominally dealing with a black
 +             * box on both sides, any transform we do has no guarantee of
 +             * avoiding mangling so we need to build a mapping to values
 +             * we know will not be mangled.
 +             */
 +            uint64_t pts = opaque_list_push(priv, avctx->internal->pkt->pts, pic_type);
 +            if (!pts) {
 +                if (free_data) {
 +                    av_freep(&in_data);
 +                }
 +                return AVERROR(ENOMEM);
 +            }
 +            av_log(priv->avctx, AV_LOG_VERBOSE,
 +                   "input \"pts\": %"PRIu64"\n", pts);
 +            ret = DtsProcInput(dev, in_data, len, pts, 0);
 +            if (free_data) {
 +                av_freep(&in_data);
 +            }
 +            if (ret == BC_STS_BUSY) {
 +                av_log(avctx, AV_LOG_WARNING,
 +                       "CrystalHD: ProcInput returned busy\n");
 +                usleep(BASE_WAIT);
 +                return AVERROR(EBUSY);
 +            } else if (ret != BC_STS_SUCCESS) {
 +                av_log(avctx, AV_LOG_ERROR,
 +                       "CrystalHD: ProcInput failed: %u\n", ret);
 +                return -1;
 +            }
 +            avctx->has_b_frames++;
 +        } else {
 +            av_log(avctx, AV_LOG_WARNING, "CrystalHD: Input buffer full\n");
 +            len = 0; // We didn't consume any bytes.
 +        }
 +    } else {
 +        av_log(avctx, AV_LOG_INFO, "CrystalHD: No more input data\n");
 +    }
 +
 +    if (priv->skip_next_output) {
 +        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Skipping next output.\n");
 +        priv->skip_next_output = 0;
 +        avctx->has_b_frames--;
 +        return len;
 +    }
 +
 +    ret = DtsGetDriverStatus(dev, &decoder_status);
 +    if (ret != BC_STS_SUCCESS) {
 +        av_log(avctx, AV_LOG_ERROR, "CrystalHD: GetDriverStatus failed\n");
 +        return -1;
 +    }
 +
 +    /*
 +     * No frames ready. Don't try to extract.
 +     *
 +     * Empirical testing shows that ReadyListCount can be a damn lie,
 +     * and ProcOut still fails when count > 0. The same testing showed
 +     * that two more iterations were needed before ProcOutput would
 +     * succeed.
 +     */
 +    if (priv->output_ready < 2) {
 +        if (decoder_status.ReadyListCount != 0)
 +            priv->output_ready++;
 +        usleep(BASE_WAIT);
 +        av_log(avctx, AV_LOG_INFO, "CrystalHD: Filling pipeline.\n");
 +        return len;
 +    } else if (decoder_status.ReadyListCount == 0) {
 +        /*
 +         * After the pipeline is established, if we encounter a lack of frames
 +         * that probably means we're not giving the hardware enough time to
 +         * decode them, so start increasing the wait time at the end of a
 +         * decode call.
 +         */
 +        usleep(BASE_WAIT);
 +        priv->decode_wait += WAIT_UNIT;
 +        av_log(avctx, AV_LOG_INFO, "CrystalHD: No frames ready. Returning\n");
 +        return len;
 +    }
 +
 +    do {
 +        rec_ret = receive_frame(avctx, data, got_frame);
 +        if (rec_ret == RET_OK && *got_frame == 0) {
 +            /*
 +             * This case is for when the encoded fields are stored
 +             * separately and we get a separate avpkt for each one. To keep
 +             * the pipeline stable, we should return nothing and wait for
 +             * the next time round to grab the second field.
 +             * H.264 PAFF is an example of this.
 +             */
 +            av_log(avctx, AV_LOG_VERBOSE, "Returning after first field.\n");
 +            avctx->has_b_frames--;
 +        } else if (rec_ret == RET_COPY_NEXT_FIELD) {
 +            /*
 +             * This case is for when the encoded fields are stored in a
 +             * single avpkt but the hardware returns then separately. Unless
 +             * we grab the second field before returning, we'll slip another
 +             * frame in the pipeline and if that happens a lot, we're sunk.
 +             * So we have to get that second field now.
 +             * Interlaced mpeg2 and vc1 are examples of this.
 +             */
 +            av_log(avctx, AV_LOG_VERBOSE, "Trying to get second field.\n");
 +            while (1) {
 +                usleep(priv->decode_wait);
 +                ret = DtsGetDriverStatus(dev, &decoder_status);
 +                if (ret == BC_STS_SUCCESS &&
 +                    decoder_status.ReadyListCount > 0) {
 +                    rec_ret = receive_frame(avctx, data, got_frame);
 +                    if ((rec_ret == RET_OK && *got_frame > 0) ||
 +                        rec_ret == RET_ERROR)
 +                        break;
 +                }
 +            }
 +            av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Got second field.\n");
 +        } else if (rec_ret == RET_SKIP_NEXT_COPY) {
 +            /*
 +             * Two input packets got turned into a field pair. Gawd.
 +             */
 +            av_log(avctx, AV_LOG_VERBOSE,
 +                   "Don't output on next decode call.\n");
 +            priv->skip_next_output = 1;
 +        }
 +        /*
 +         * If rec_ret == RET_COPY_AGAIN, that means that either we just handled
 +         * a FMT_CHANGE event and need to go around again for the actual frame,
 +         * we got a busy status and need to try again, or we're dealing with
 +         * packed b-frames, where the hardware strangely returns the packed
 +         * p-frame twice. We choose to keep the second copy as it carries the
 +         * valid pts.
 +         */
 +    } while (rec_ret == RET_COPY_AGAIN);
 +    usleep(priv->decode_wait);
 +    return len;
 +}
 +
 +
 +#if CONFIG_H264_CRYSTALHD_DECODER
 +static AVClass h264_class = {
 +    "h264_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_h264_crystalhd_decoder = {
 +    .name           = "h264_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &h264_class,
 +};
 +#endif
 +
 +#if CONFIG_MPEG2_CRYSTALHD_DECODER
 +static AVClass mpeg2_class = {
 +    "mpeg2_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_mpeg2_crystalhd_decoder = {
 +    .name           = "mpeg2_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("MPEG-2 Video (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG2VIDEO,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &mpeg2_class,
 +};
 +#endif
 +
 +#if CONFIG_MPEG4_CRYSTALHD_DECODER
 +static AVClass mpeg4_class = {
 +    "mpeg4_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_mpeg4_crystalhd_decoder = {
 +    .name           = "mpeg4_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG4,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &mpeg4_class,
 +};
 +#endif
 +
 +#if CONFIG_MSMPEG4_CRYSTALHD_DECODER
 +static AVClass msmpeg4_class = {
 +    "msmpeg4_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_msmpeg4_crystalhd_decoder = {
 +    .name           = "msmpeg4_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 Part 2 Microsoft variant version 3 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MSMPEG4V3,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &msmpeg4_class,
 +};
 +#endif
 +
 +#if CONFIG_VC1_CRYSTALHD_DECODER
 +static AVClass vc1_class = {
 +    "vc1_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_vc1_crystalhd_decoder = {
 +    .name           = "vc1_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("SMPTE VC-1 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_VC1,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &vc1_class,
 +};
 +#endif
 +
 +#if CONFIG_WMV3_CRYSTALHD_DECODER
 +static AVClass wmv3_class = {
 +    "wmv3_crystalhd",
 +    av_default_item_name,
 +    options,
 +    LIBAVUTIL_VERSION_INT,
 +};
 +
 +AVCodec ff_wmv3_crystalhd_decoder = {
 +    .name           = "wmv3_crystalhd",
 +    .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 9 (CrystalHD acceleration)"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_WMV3,
 +    .priv_data_size = sizeof(CHDContext),
 +    .init           = init,
 +    .close          = uninit,
 +    .decode         = decode,
 +    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
 +    .flush          = flush,
 +    .pix_fmts       = (const enum AVPixelFormat[]){AV_PIX_FMT_YUYV422, AV_PIX_FMT_NONE},
 +    .priv_class     = &wmv3_class,
 +};
 +#endif
@@@ -20,9 -20,7 +20,9 @@@
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
- #include "h264.h"
 +#include "libavutil/avassert.h"
 +
+ #include "h264dec.h"
  #include "h264data.h"
  #include "mpegutils.h"
  
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
  
  #include "internal.h"
  #include "avcodec.h"
- #include "h264.h"
+ #include "h264dec.h"
  #include "mpegutils.h"
 +#include "libavutil/avassert.h"
  
 -#include <assert.h>
  
  static av_always_inline int fetch_diagonal_mv(const H264Context *h, H264SliceContext *sl,
                                                const int16_t **C,
Simple merge
Simple merge
Simple merge
Simple merge
  
  #include <inttypes.h>
  
 +#include "libavutil/avassert.h"
  #include "internal.h"
  #include "avcodec.h"
- #include "h264.h"
+ #include "h264dec.h"
  #include "golomb.h"
  #include "mpegutils.h"
  
Simple merge
Simple merge
Simple merge
Simple merge
index 0000000,6eb2da8..8e07e7e
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,982 +1,1292 @@@
 - * This file is part of Libav.
+ /*
+  * H.26L/H.264/AVC/JVT/14496-10/... decoder
+  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+  *
 - * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 -#include <assert.h>
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ /**
+  * @file
+  * H.264 / AVC / MPEG-4 part10 codec.
+  * @author Michael Niedermayer <michaelni@gmx.at>
+  */
++#define UNCHECKED_BITSTREAM_READER 1
++
++#include "libavutil/avassert.h"
+ #include "libavutil/display.h"
+ #include "libavutil/imgutils.h"
+ #include "libavutil/opt.h"
+ #include "libavutil/stereo3d.h"
+ #include "libavutil/timer.h"
+ #include "internal.h"
+ #include "bytestream.h"
+ #include "cabac.h"
+ #include "cabac_functions.h"
+ #include "error_resilience.h"
+ #include "avcodec.h"
+ #include "h264dec.h"
+ #include "h2645_parse.h"
+ #include "h264data.h"
+ #include "h264chroma.h"
+ #include "h264_mvpred.h"
+ #include "golomb.h"
+ #include "mathops.h"
+ #include "me_cmp.h"
+ #include "mpegutils.h"
+ #include "profiles.h"
+ #include "rectangle.h"
+ #include "thread.h"
++#include "vdpau_compat.h"
 -    assert(ref >= 0);
++static int h264_decode_end(AVCodecContext *avctx);
+ const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
++int avpriv_h264_has_num_reorder_frames(AVCodecContext *avctx)
++{
++    H264Context *h = avctx->priv_data;
++    return h && h->ps.sps ? h->ps.sps->num_reorder_frames : 0;
++}
++
+ static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
+                               int (*mv)[2][4][2],
+                               int mb_x, int mb_y, int mb_intra, int mb_skipped)
+ {
+     H264Context *h = opaque;
+     H264SliceContext *sl = &h->slice_ctx[0];
+     sl->mb_x = mb_x;
+     sl->mb_y = mb_y;
+     sl->mb_xy = mb_x + mb_y * h->mb_stride;
+     memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
 -    assert(!FRAME_MBAFF(h));
++    av_assert1(ref >= 0);
+     /* FIXME: It is possible albeit uncommon that slice references
+      * differ between slices. We take the easy approach and ignore
+      * it for now. If this turns out to have any relevance in
+      * practice then correct remapping should be added. */
+     if (ref >= sl->ref_count[0])
+         ref = 0;
++    if (!sl->ref_list[0][ref].data[0]) {
++        av_log(h->avctx, AV_LOG_DEBUG, "Reference not available for error concealing\n");
++        ref = 0;
++    }
++    if ((sl->ref_list[0][ref].reference&3) != 3) {
++        av_log(h->avctx, AV_LOG_DEBUG, "Reference invalid\n");
++        return;
++    }
+     fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
+                    2, 2, 2, ref, 1);
+     fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
+     fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
+                    pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
 -    const int row_mb_num = h->mb_stride * 2 * h->nb_slice_ctx;
++    sl->mb_mbaff =
++    sl->mb_field_decoding_flag = 0;
+     ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
+ }
+ void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
+                              int y, int height)
+ {
+     AVCodecContext *avctx = h->avctx;
+     const AVFrame   *src  = h->cur_pic.f;
+     const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
+     int vshift = desc->log2_chroma_h;
+     const int field_pic = h->picture_structure != PICT_FRAME;
+     if (field_pic) {
+         height <<= 1;
+         y      <<= 1;
+     }
+     height = FFMIN(height, avctx->height - y);
+     if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
+         return;
+     if (avctx->draw_horiz_band) {
+         int offset[AV_NUM_DATA_POINTERS];
+         int i;
+         offset[0] = y * src->linesize[0];
+         offset[1] =
+         offset[2] = (y >> vshift) * src->linesize[1];
+         for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
+             offset[i] = 0;
+         emms_c();
+         avctx->draw_horiz_band(avctx, src, offset,
+                                y, h->picture_structure, height);
+     }
+ }
+ void ff_h264_free_tables(H264Context *h)
+ {
+     int i;
+     av_freep(&h->intra4x4_pred_mode);
+     av_freep(&h->chroma_pred_mode_table);
+     av_freep(&h->cbp_table);
+     av_freep(&h->mvd_table[0]);
+     av_freep(&h->mvd_table[1]);
+     av_freep(&h->direct_table);
+     av_freep(&h->non_zero_count);
+     av_freep(&h->slice_table_base);
+     h->slice_table = NULL;
+     av_freep(&h->list_counts);
+     av_freep(&h->mb2b_xy);
+     av_freep(&h->mb2br_xy);
+     av_buffer_pool_uninit(&h->qscale_table_pool);
+     av_buffer_pool_uninit(&h->mb_type_pool);
+     av_buffer_pool_uninit(&h->motion_val_pool);
+     av_buffer_pool_uninit(&h->ref_index_pool);
+     for (i = 0; i < h->nb_slice_ctx; i++) {
+         H264SliceContext *sl = &h->slice_ctx[i];
+         av_freep(&sl->dc_val_base);
+         av_freep(&sl->er.mb_index2xy);
+         av_freep(&sl->er.error_status_table);
+         av_freep(&sl->er.er_temp_buffer);
+         av_freep(&sl->bipred_scratchpad);
+         av_freep(&sl->edge_emu_buffer);
+         av_freep(&sl->top_borders[0]);
+         av_freep(&sl->top_borders[1]);
+         sl->bipred_scratchpad_allocated = 0;
+         sl->edge_emu_buffer_allocated   = 0;
+         sl->top_borders_allocated[0]    = 0;
+         sl->top_borders_allocated[1]    = 0;
+     }
+ }
+ int ff_h264_alloc_tables(H264Context *h)
+ {
+     const int big_mb_num = h->mb_stride * (h->mb_height + 1);
 -    FF_ALLOCZ_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
 -                      row_mb_num * 8 * sizeof(uint8_t), fail)
++    const int row_mb_num = 2*h->mb_stride*FFMAX(h->nb_slice_ctx, 1);
+     int x, y;
 -    FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
 -                      16 * row_mb_num * sizeof(uint8_t), fail);
 -    FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
 -                      16 * row_mb_num * sizeof(uint8_t), fail);
++    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
++                      row_mb_num, 8 * sizeof(uint8_t), fail)
+     h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
+                       big_mb_num * 48 * sizeof(uint8_t), fail)
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
+                       (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
+                       big_mb_num * sizeof(uint16_t), fail)
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
+                       big_mb_num * sizeof(uint8_t), fail)
 -    for (i = 0; i < MAX_SPS_COUNT; i++)
 -        av_buffer_unref(&h->ps.sps_list[i]);
 -
 -    for (i = 0; i < MAX_PPS_COUNT; i++)
 -        av_buffer_unref(&h->ps.pps_list[i]);
++    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[0],
++                      row_mb_num, 16 * sizeof(uint8_t), fail);
++    FF_ALLOCZ_ARRAY_OR_GOTO(h->avctx, h->mvd_table[1],
++                      row_mb_num, 16 * sizeof(uint8_t), fail);
+     h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
+     h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
+                       4 * big_mb_num * sizeof(uint8_t), fail);
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
+                       big_mb_num * sizeof(uint8_t), fail)
+     memset(h->slice_table_base, -1,
+            (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
+     h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
+                       big_mb_num * sizeof(uint32_t), fail);
+     FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
+                       big_mb_num * sizeof(uint32_t), fail);
+     for (y = 0; y < h->mb_height; y++)
+         for (x = 0; x < h->mb_width; x++) {
+             const int mb_xy = x + y * h->mb_stride;
+             const int b_xy  = 4 * x + 4 * y * h->b_stride;
+             h->mb2b_xy[mb_xy]  = b_xy;
+             h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
+         }
+     return 0;
+ fail:
+     ff_h264_free_tables(h);
+     return AVERROR(ENOMEM);
+ }
+ /**
+  * Init context
+  * Allocate buffers which are not shared amongst multiple threads.
+  */
+ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
+ {
+     ERContext *er = &sl->er;
+     int mb_array_size = h->mb_height * h->mb_stride;
+     int y_size  = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
+     int c_size  = h->mb_stride * (h->mb_height + 1);
+     int yc_size = y_size + 2   * c_size;
+     int x, y, i;
+     sl->ref_cache[0][scan8[5]  + 1] =
+     sl->ref_cache[0][scan8[7]  + 1] =
+     sl->ref_cache[0][scan8[13] + 1] =
+     sl->ref_cache[1][scan8[5]  + 1] =
+     sl->ref_cache[1][scan8[7]  + 1] =
+     sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
++    if (sl != h->slice_ctx) {
++        memset(er, 0, sizeof(*er));
++    } else
+     if (CONFIG_ERROR_RESILIENCE) {
++
+         /* init ER */
+         er->avctx          = h->avctx;
+         er->decode_mb      = h264_er_decode_mb;
+         er->opaque         = h;
+         er->quarter_sample = 1;
+         er->mb_num      = h->mb_num;
+         er->mb_width    = h->mb_width;
+         er->mb_height   = h->mb_height;
+         er->mb_stride   = h->mb_stride;
+         er->b8_stride   = h->mb_width * 2 + 1;
+         // error resilience code looks cleaner with this
+         FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
+                           (h->mb_num + 1) * sizeof(int), fail);
+         for (y = 0; y < h->mb_height; y++)
+             for (x = 0; x < h->mb_width; x++)
+                 er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
+         er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
+                                                       h->mb_stride + h->mb_width;
+         FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
+                           mb_array_size * sizeof(uint8_t), fail);
+         FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
+                          h->mb_height * h->mb_stride, fail);
+         FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
+                           yc_size * sizeof(int16_t), fail);
+         er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
+         er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
+         er->dc_val[2] = er->dc_val[1] + c_size;
+         for (i = 0; i < yc_size; i++)
+             sl->dc_val_base[i] = 1024;
+     }
+     return 0;
+ fail:
+     return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
+ }
+ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
+ {
+     int i;
+     h->avctx                 = avctx;
++    h->backup_width          = -1;
++    h->backup_height         = -1;
++    h->backup_pix_fmt        = AV_PIX_FMT_NONE;
++    h->cur_chroma_format_idc = -1;
+     h->picture_structure     = PICT_FRAME;
+     h->workaround_bugs       = avctx->workaround_bugs;
+     h->flags                 = avctx->flags;
+     h->poc.prev_poc_msb      = 1 << 16;
+     h->recovery_frame        = -1;
+     h->frame_recovered       = 0;
++    h->poc.prev_frame_num    = -1;
++    h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;
++    h->sei.unregistered.x264_build = -1;
+     h->next_outputed_poc = INT_MIN;
+     for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
+         h->last_pocs[i] = INT_MIN;
+     ff_h264_sei_uninit(&h->sei);
+     avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
+     h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
+     h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
+     if (!h->slice_ctx) {
+         h->nb_slice_ctx = 0;
+         return AVERROR(ENOMEM);
+     }
+     for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
+         h->DPB[i].f = av_frame_alloc();
+         if (!h->DPB[i].f)
+             return AVERROR(ENOMEM);
+     }
+     h->cur_pic.f = av_frame_alloc();
+     if (!h->cur_pic.f)
+         return AVERROR(ENOMEM);
++    h->last_pic_for_ec.f = av_frame_alloc();
++    if (!h->last_pic_for_ec.f)
++        return AVERROR(ENOMEM);
++
+     for (i = 0; i < h->nb_slice_ctx; i++)
+         h->slice_ctx[i].h264 = h;
+     return 0;
+ }
+ static av_cold int h264_decode_end(AVCodecContext *avctx)
+ {
+     H264Context *h = avctx->priv_data;
+     int i;
++    ff_h264_remove_all_refs(h);
+     ff_h264_free_tables(h);
+     for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
+         ff_h264_unref_picture(h, &h->DPB[i]);
+         av_frame_free(&h->DPB[i].f);
+     }
++    memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
+     h->cur_pic_ptr = NULL;
+     av_freep(&h->slice_ctx);
+     h->nb_slice_ctx = 0;
 -        if (avctx->ticks_per_frame == 1)
 -            h->avctx->framerate.num *= 2;
++    ff_h264_sei_uninit(&h->sei);
++    ff_h264_ps_uninit(&h->ps);
+     ff_h2645_packet_uninit(&h->pkt);
+     ff_h264_unref_picture(h, &h->cur_pic);
+     av_frame_free(&h->cur_pic.f);
++    ff_h264_unref_picture(h, &h->last_pic_for_ec);
++    av_frame_free(&h->last_pic_for_ec.f);
+     return 0;
+ }
+ static AVOnce h264_vlc_init = AV_ONCE_INIT;
+ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
+ {
+     H264Context *h = avctx->priv_data;
+     int ret;
+     ret = h264_init_context(avctx, h);
+     if (ret < 0)
+         return ret;
+     ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
+     if (ret != 0) {
+         av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
+         return AVERROR_UNKNOWN;
+     }
+     if (avctx->codec_id == AV_CODEC_ID_H264) {
 -       ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
 -                                      &h->ps, &h->is_avc, &h->nal_length_size,
 -                                      avctx->err_recognition, avctx);
 -       if (ret < 0) {
 -           h264_decode_end(avctx);
 -           return ret;
 -       }
++        if (avctx->ticks_per_frame == 1) {
++            if(h->avctx->time_base.den < INT_MAX/2) {
++                h->avctx->time_base.den *= 2;
++            } else
++                h->avctx->time_base.num /= 2;
++        }
+         avctx->ticks_per_frame = 2;
+     }
+     if (avctx->extradata_size > 0 && avctx->extradata) {
 -    if (h->enable_er) {
++        ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
++                                       &h->ps, &h->is_avc, &h->nal_length_size,
++                                       avctx->err_recognition, avctx);
++        if (ret < 0) {
++            h264_decode_end(avctx);
++            return ret;
++        }
+     }
+     if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
+         h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
+         h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
+     }
+     avctx->internal->allocate_progress = 1;
 -               "Error resilience is enabled. It is unsafe and unsupported and may crash. "
++    ff_h264_flush_change(h);
++
++    if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
++        h->enable_er = 0;
++
++    if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
+         av_log(avctx, AV_LOG_WARNING,
 -    int invalid = 0, cnt = 0;
++               "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
+                "Use it at your own risk\n");
+     }
+     return 0;
+ }
++#if HAVE_THREADS
+ static int decode_init_thread_copy(AVCodecContext *avctx)
+ {
+     H264Context *h = avctx->priv_data;
+     int ret;
+     if (!avctx->internal->is_copy)
+         return 0;
+     memset(h, 0, sizeof(*h));
+     ret = h264_init_context(avctx, h);
+     if (ret < 0)
+         return ret;
+     h->context_initialized = 0;
+     return 0;
+ }
++#endif
+ /**
+  * Run setup operations that must be run after slice header decoding.
+  * This includes finding the next displayed frame.
+  *
+  * @param h h264 master context
+  * @param setup_finished enough NALs have been read that we can call
+  * ff_thread_finish_setup()
+  */
+ static void decode_postinit(H264Context *h, int setup_finished)
+ {
+     const SPS *sps = h->ps.sps;
+     H264Picture *out = h->cur_pic_ptr;
+     H264Picture *cur = h->cur_pic_ptr;
+     int i, pics, out_of_order, out_idx;
 -        return;
+     if (h->next_output_pic)
+         return;
+     if (cur->field_poc[0] == INT_MAX || cur->field_poc[1] == INT_MAX) {
+         /* FIXME: if we have two PAFF fields in one packet, we can't start
+          * the next thread here. If we have one field per packet, we can.
+          * The check in decode_nal_units() is not good enough to find this
+          * yet, so we assume the worst for now. */
+         // if (setup_finished)
+         //    ff_thread_finish_setup(h->avctx);
 -        h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
++        if (cur->field_poc[0] == INT_MAX && cur->field_poc[1] == INT_MAX)
++            return;
++        if (h->avctx->hwaccel || h->missing_fields <=1)
++            return;
+     }
++    cur->mmco_reset = h->mmco_reset;
++    h->mmco_reset = 0;
++
+     // FIXME do something with unavailable reference frames
+     /* Sort B-frames into display order */
+     if (sps->bitstream_restriction_flag ||
 -    assert(pics <= MAX_DELAYED_PIC_COUNT);
++        h->avctx->strict_std_compliance >= FF_COMPLIANCE_STRICT) {
+         h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
+     }
++    for (i = 0; 1; i++) {
++        if(i == MAX_DELAYED_PIC_COUNT || cur->poc < h->last_pocs[i]){
++            if(i)
++                h->last_pocs[i-1] = cur->poc;
++            break;
++        } else if(i) {
++            h->last_pocs[i-1]= h->last_pocs[i];
++        }
++    }
++    out_of_order = MAX_DELAYED_PIC_COUNT - i;
++    if(   cur->f->pict_type == AV_PICTURE_TYPE_B
++       || (h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > INT_MIN && h->last_pocs[MAX_DELAYED_PIC_COUNT-1] - h->last_pocs[MAX_DELAYED_PIC_COUNT-2] > 2))
++        out_of_order = FFMAX(out_of_order, 1);
++    if (out_of_order == MAX_DELAYED_PIC_COUNT) {
++        av_log(h->avctx, AV_LOG_VERBOSE, "Invalid POC %d<%d\n", cur->poc, h->last_pocs[0]);
++        for (i = 1; i < MAX_DELAYED_PIC_COUNT; i++)
++            h->last_pocs[i] = INT_MIN;
++        h->last_pocs[0] = cur->poc;
++        cur->mmco_reset = 1;
++    } else if(h->avctx->has_b_frames < out_of_order && !sps->bitstream_restriction_flag){
++        av_log(h->avctx, AV_LOG_INFO, "Increasing reorder buffer to %d\n", out_of_order);
++        h->avctx->has_b_frames = out_of_order;
++    }
++
+     pics = 0;
+     while (h->delayed_pic[pics])
+         pics++;
 -    /* Frame reordering. This code takes pictures from coding order and sorts
 -     * them by their incremental POC value into display order. It supports POC
 -     * gaps, MMCO reset codes and random resets.
 -     * A "display group" can start either with a IDR frame (f.key_frame = 1),
 -     * and/or can be closed down with a MMCO reset code. In sequences where
 -     * there is no delay, we can't detect that (since the frame was already
 -     * output to the user), so we also set h->mmco_reset to detect the MMCO
 -     * reset code.
 -     * FIXME: if we detect insufficient delays (as per h->avctx->has_b_frames),
 -     * we increase the delay between input and output. All frames affected by
 -     * the lag (e.g. those that should have been output before another frame
 -     * that we already returned to the user) will be dropped. This is a bug
 -     * that we will fix later. */
 -    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++) {
 -        cnt     += out->poc < h->last_pocs[i];
 -        invalid += out->poc == INT_MIN;
 -    }
 -    if (!h->mmco_reset && !cur->f->key_frame &&
 -        cnt + invalid == MAX_DELAYED_PIC_COUNT && cnt > 0) {
 -        h->mmco_reset = 2;
 -        if (pics > 1)
 -            h->delayed_pic[pics - 2]->mmco_reset = 2;
 -    }
 -    if (h->mmco_reset || cur->f->key_frame) {
 -        for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
 -            h->last_pocs[i] = INT_MIN;
 -        cnt     = 0;
 -        invalid = MAX_DELAYED_PIC_COUNT;
 -    }
++    av_assert0(pics <= MAX_DELAYED_PIC_COUNT);
+     h->delayed_pic[pics++] = cur;
+     if (cur->reference == 0)
+         cur->reference = DELAYED_PIC_REF;
 -    for (i = 1; i < MAX_DELAYED_PIC_COUNT &&
 -                h->delayed_pic[i] &&
 -                !h->delayed_pic[i - 1]->mmco_reset &&
 -                !h->delayed_pic[i]->f->key_frame;
+     out     = h->delayed_pic[0];
+     out_idx = 0;
 -        (h->delayed_pic[0]->f->key_frame || h->mmco_reset))
++    for (i = 1; h->delayed_pic[i] &&
++                !h->delayed_pic[i]->f->key_frame &&
++                !h->delayed_pic[i]->mmco_reset;
+          i++)
+         if (h->delayed_pic[i]->poc < out->poc) {
+             out     = h->delayed_pic[i];
+             out_idx = i;
+         }
+     if (h->avctx->has_b_frames == 0 &&
 -    out_of_order = !out->f->key_frame && !h->mmco_reset &&
 -                   (out->poc < h->next_outputed_poc);
 -
 -    if (sps->bitstream_restriction_flag &&
 -        h->avctx->has_b_frames >= sps->num_reorder_frames) {
 -    } else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
 -               h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
 -        if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
 -            h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, cnt);
 -        }
 -    } else if (!h->avctx->has_b_frames &&
 -               ((h->next_outputed_poc != INT_MIN &&
 -                 out->poc > h->next_outputed_poc + 2) ||
 -                cur->f->pict_type == AV_PICTURE_TYPE_B)) {
 -        h->avctx->has_b_frames++;
 -    }
++        (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset))
+         h->next_outputed_poc = INT_MIN;
 -    if (pics > h->avctx->has_b_frames) {
++    out_of_order = out->poc < h->next_outputed_poc;
 -    memmove(h->last_pocs, &h->last_pocs[1],
 -            sizeof(*h->last_pocs) * (MAX_DELAYED_PIC_COUNT - 1));
 -    h->last_pocs[MAX_DELAYED_PIC_COUNT - 1] = cur->poc;
++    if (out_of_order || pics > h->avctx->has_b_frames) {
+         out->reference &= ~DELAYED_PIC_REF;
+         for (i = out_idx; h->delayed_pic[i]; i++)
+             h->delayed_pic[i] = h->delayed_pic[i + 1];
+     }
 -        if (out->mmco_reset) {
 -            if (out_idx > 0) {
 -                h->next_outputed_poc                    = out->poc;
 -                h->delayed_pic[out_idx - 1]->mmco_reset = out->mmco_reset;
 -            } else {
 -                h->next_outputed_poc = INT_MIN;
 -            }
 -        } else {
 -            if (out_idx == 0 && pics > 1 && h->delayed_pic[0]->f->key_frame) {
 -                h->next_outputed_poc = INT_MIN;
 -            } else {
 -                h->next_outputed_poc = out->poc;
 -            }
 -        }
 -        h->mmco_reset = 0;
+     if (!out_of_order && pics > h->avctx->has_b_frames) {
+         h->next_output_pic = out;
 -        av_log(h->avctx, AV_LOG_DEBUG, "no picture\n");
++        if (out_idx == 0 && h->delayed_pic[0] && (h->delayed_pic[0]->f->key_frame || h->delayed_pic[0]->mmco_reset)) {
++            h->next_outputed_poc = INT_MIN;
++        } else
++            h->next_outputed_poc = out->poc;
+     } else {
 -    h->poc.prev_frame_num_offset =
 -    h->poc.prev_poc_msb          =
++        av_log(h->avctx, AV_LOG_DEBUG, "no picture %s\n", out_of_order ? "ooo" : "");
+     }
+     if (h->next_output_pic) {
+         if (h->next_output_pic->recovered) {
+             // We have reached an recovery point and all frames after it in
+             // display order are "recovered".
+             h->frame_recovered |= FRAME_RECOVERED_SEI;
+         }
+         h->next_output_pic->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_SEI);
+     }
+     if (setup_finished && !h->avctx->hwaccel) {
+         ff_thread_finish_setup(h->avctx);
+         if (h->avctx->active_thread_type & FF_THREAD_FRAME)
+             h->setup_finished = 1;
+     }
+ }
+ /**
+  * instantaneous decoder refresh.
+  */
+ static void idr(H264Context *h)
+ {
++    int i;
+     ff_h264_remove_all_refs(h);
+     h->poc.prev_frame_num        =
 -    int i;
 -    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
 -        h->last_pocs[i] = INT_MIN;
++    h->poc.prev_frame_num_offset = 0;
++    h->poc.prev_poc_msb          = 1<<16;
+     h->poc.prev_poc_lsb          = 0;
++    for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
++        h->last_pocs[i] = INT_MIN;
+ }
+ /* forget old pics after a seek */
+ void ff_h264_flush_change(H264Context *h)
+ {
 -    if (h->cur_pic_ptr)
++    int i, j;
++
+     h->next_outputed_poc = INT_MIN;
+     h->prev_interlaced_frame = 1;
+     idr(h);
 -            init_get_bits(&gb, nal->data + 1, (nal->size - 1) * 8);
 -            if (!get_ue_golomb(&gb))
++
++    h->poc.prev_frame_num = -1;
++    if (h->cur_pic_ptr) {
+         h->cur_pic_ptr->reference = 0;
++        for (j=i=0; h->delayed_pic[i]; i++)
++            if (h->delayed_pic[i] != h->cur_pic_ptr)
++                h->delayed_pic[j++] = h->delayed_pic[i];
++        h->delayed_pic[j] = NULL;
++    }
++    ff_h264_unref_picture(h, &h->last_pic_for_ec);
++
+     h->first_field = 0;
+     ff_h264_sei_uninit(&h->sei);
+     h->recovery_frame = -1;
+     h->frame_recovered = 0;
++    h->current_slice = 0;
++    h->mmco_reset = 1;
+ }
+ /* forget old pics after a seek */
+ static void flush_dpb(AVCodecContext *avctx)
+ {
+     H264Context *h = avctx->priv_data;
+     int i;
+     memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
+     ff_h264_flush_change(h);
+     for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
+         ff_h264_unref_picture(h, &h->DPB[i]);
+     h->cur_pic_ptr = NULL;
+     ff_h264_unref_picture(h, &h->cur_pic);
+     h->mb_y = 0;
+     ff_h264_free_tables(h);
+     h->context_initialized = 0;
+ }
++#if FF_API_CAP_VDPAU
++static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
++#endif
++
+ static int get_last_needed_nal(H264Context *h)
+ {
+     int nals_needed = 0;
++    int first_slice = 0;
+     int i;
++    int ret;
+     for (i = 0; i < h->pkt.nb_nals; i++) {
+         H2645NAL *nal = &h->pkt.nals[i];
+         GetBitContext gb;
+         /* packets can sometimes contain multiple PPS/SPS,
+          * e.g. two PAFF field pictures in one packet, or a demuxer
+          * which splits NALs strangely if so, when frame threading we
+          * can't start the next thread until we've read all of them */
+         switch (nal->type) {
+         case NAL_SPS:
+         case NAL_PPS:
+             nals_needed = i;
+             break;
+         case NAL_DPA:
+         case NAL_IDR_SLICE:
+         case NAL_SLICE:
 -            idr(h); // FIXME ensure we don't lose some frames if there is reordering
++            ret = init_get_bits8(&gb, nal->data + 1, (nal->size - 1));
++            if (ret < 0)
++                return ret;
++            if (!get_ue_golomb_long(&gb) ||  // first_mb_in_slice
++                !first_slice ||
++                first_slice != nal->type)
+                 nals_needed = i;
++            if (!first_slice)
++                first_slice = nal->type;
+         }
+     }
+     return nals_needed;
+ }
++static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
++{
++    av_log(logctx, AV_LOG_DEBUG, "Green Metadata Info SEI message\n");
++    av_log(logctx, AV_LOG_DEBUG, "  green_metadata_type: %d\n", gm->green_metadata_type);
++
++    if (gm->green_metadata_type == 0) {
++        av_log(logctx, AV_LOG_DEBUG, "  green_metadata_period_type: %d\n", gm->period_type);
++
++        if (gm->period_type == 2)
++            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_seconds: %d\n", gm->num_seconds);
++        else if (gm->period_type == 3)
++            av_log(logctx, AV_LOG_DEBUG, "  green_metadata_num_pictures: %d\n", gm->num_pictures);
++
++        av_log(logctx, AV_LOG_DEBUG, "  SEI GREEN Complexity Metrics: %f %f %f %f\n",
++               (float)gm->percent_non_zero_macroblocks/255,
++               (float)gm->percent_intra_coded_macroblocks/255,
++               (float)gm->percent_six_tap_filtering/255,
++               (float)gm->percent_alpha_point_deblocking_instance/255);
++
++    } else if (gm->green_metadata_type == 1) {
++        av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_type: %d\n", gm->xsd_metric_type);
++
++        if (gm->xsd_metric_type == 0)
++            av_log(logctx, AV_LOG_DEBUG, "  xsd_metric_value: %f\n",
++                   (float)gm->xsd_metric_value/100);
++    }
++}
++
+ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
+ {
+     AVCodecContext *const avctx = h->avctx;
+     unsigned context_count = 0;
+     int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
++    int idr_cleared=0;
+     int i, ret = 0;
++    h->nal_unit_type= 0;
++
++    h->max_contexts = h->nb_slice_ctx;
+     if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
+         h->current_slice = 0;
+         if (!h->first_field)
+             h->cur_pic_ptr = NULL;
+         ff_h264_sei_uninit(&h->sei);
+     }
++    if (h->nal_length_size == 4) {
++        if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
++            h->is_avc = 0;
++        }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
++            h->is_avc = 1;
++    }
++
+     ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
+                                 h->nal_length_size, avctx->codec_id);
+     if (ret < 0) {
+         av_log(avctx, AV_LOG_ERROR,
+                "Error splitting the input into NAL units.\n");
+         return ret;
+     }
+     if (avctx->active_thread_type & FF_THREAD_FRAME)
+         nals_needed = get_last_needed_nal(h);
++    if (nals_needed < 0)
++        return nals_needed;
+     for (i = 0; i < h->pkt.nb_nals; i++) {
+         H2645NAL *nal = &h->pkt.nals[i];
+         H264SliceContext *sl = &h->slice_ctx[context_count];
+         int err;
+         if (avctx->skip_frame >= AVDISCARD_NONREF &&
+             nal->ref_idc == 0 && nal->type != NAL_SEI)
+             continue;
++again:
+         // FIXME these should stop being context-global variables
+         h->nal_ref_idc   = nal->ref_idc;
+         h->nal_unit_type = nal->type;
+         err = 0;
+         switch (nal->type) {
+         case NAL_IDR_SLICE:
++            if ((nal->data[1] & 0xFC) == 0x98) {
++                av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame\n");
++                h->next_outputed_poc = INT_MIN;
++                ret = -1;
++                goto end;
++            }
+             if (nal->type != NAL_IDR_SLICE) {
+                 av_log(h->avctx, AV_LOG_ERROR,
+                        "Invalid mix of idr and non-idr slices\n");
+                 ret = -1;
+                 goto end;
+             }
 -            if (h->sei.recovery_point.recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
 -                h->recovery_frame = (h->poc.frame_num + h->sei.recovery_point.recovery_frame_cnt) &
 -                                    ((1 << h->ps.sps->log2_max_frame_num) - 1);
++            if(!idr_cleared) {
++                if (h->current_slice && (avctx->active_thread_type & FF_THREAD_SLICE)) {
++                    av_log(h, AV_LOG_ERROR, "invalid mixed IDR / non IDR frames cannot be decoded in slice multithreading mode\n");
++                    ret = AVERROR_INVALIDDATA;
++                    goto end;
++                }
++                idr(h); // FIXME ensure we don't lose some frames if there is reordering
++            }
++            idr_cleared = 1;
++            h->has_recovery_point = 1;
+         case NAL_SLICE:
+             sl->gb = nal->gb;
+             if ((err = ff_h264_decode_slice_header(h, sl, nal)))
+                 break;
 -            h->cur_pic_ptr->f->key_frame |=
 -                (nal->type == NAL_IDR_SLICE) || (h->sei.recovery_point.recovery_frame_cnt >= 0);
++            if (h->sei.recovery_point.recovery_frame_cnt >= 0) {
++                const int sei_recovery_frame_cnt = h->sei.recovery_point.recovery_frame_cnt;
++
++                if (h->poc.frame_num != sei_recovery_frame_cnt || sl->slice_type_nos != AV_PICTURE_TYPE_I)
++                    h->valid_recovery_point = 1;
++
++                if (   h->recovery_frame < 0
++                    || av_mod_uintp2(h->recovery_frame - h->poc.frame_num, h->ps.sps->log2_max_frame_num) > sei_recovery_frame_cnt) {
++                    h->recovery_frame = av_mod_uintp2(h->poc.frame_num + sei_recovery_frame_cnt, h->ps.sps->log2_max_frame_num);
++
++                    if (!h->valid_recovery_point)
++                        h->recovery_frame = h->poc.frame_num;
++                }
+             }
 -            if (nal->type == NAL_IDR_SLICE || h->recovery_frame == h->poc.frame_num) {
++            h->cur_pic_ptr->f->key_frame |= (nal->type == NAL_IDR_SLICE);
 -                    (ret = h->avctx->hwaccel->start_frame(h->avctx, NULL, 0)) < 0)
 -                    return ret;
++            if (nal->type == NAL_IDR_SLICE ||
++                (h->recovery_frame == h->poc.frame_num && nal->ref_idc)) {
+                 h->recovery_frame         = -1;
+                 h->cur_pic_ptr->recovered = 1;
+             }
+             // If we have an IDR, all frames after it in decoded order are
+             // "recovered".
+             if (nal->type == NAL_IDR_SLICE)
+                 h->frame_recovered |= FRAME_RECOVERED_IDR;
++#if 1
++            h->cur_pic_ptr->recovered |= h->frame_recovered;
++#else
+             h->cur_pic_ptr->recovered |= !!(h->frame_recovered & FRAME_RECOVERED_IDR);
++#endif
+             if (h->current_slice == 1) {
+                 if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS))
+                     decode_postinit(h, i >= nals_needed);
+                 if (h->avctx->hwaccel &&
 -            if (sl->redundant_pic_count == 0 &&
 -                (avctx->skip_frame < AVDISCARD_NONREF || nal->ref_idc) &&
 -                (avctx->skip_frame < AVDISCARD_BIDIR  ||
 -                 sl->slice_type_nos != AV_PICTURE_TYPE_B) &&
 -                (avctx->skip_frame < AVDISCARD_NONKEY ||
 -                 h->cur_pic_ptr->f->key_frame) &&
 -                avctx->skip_frame < AVDISCARD_ALL) {
++                    (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
++                    goto end;
++#if FF_API_CAP_VDPAU
++                if (CONFIG_H264_VDPAU_DECODER &&
++                    h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
++                    ff_vdpau_h264_picture_start(h);
++#endif
+             }
 -                    ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
++            if (sl->redundant_pic_count == 0) {
+                 if (avctx->hwaccel) {
 -                        return ret;
++                    ret = avctx->hwaccel->decode_slice(avctx,
++                                                       nal->raw_data,
++                                                       nal->raw_size);
+                     if (ret < 0)
 -            ret = AVERROR(ENOSYS);
 -            goto end;
++                        goto end;
++#if FF_API_CAP_VDPAU
++                } else if (CONFIG_H264_VDPAU_DECODER &&
++                           h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
++                    ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
++                                            start_code,
++                                            sizeof(start_code));
++                    ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
++                                            nal->raw_data,
++                                            nal->raw_size);
++#endif
+                 } else
+                     context_count++;
+             }
+             break;
+         case NAL_DPA:
+         case NAL_DPB:
+         case NAL_DPC:
+             avpriv_request_sample(avctx, "data partitioning");
 -        case NAL_SPS:
 -            ret = ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps);
 -            if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
 -                goto end;
+             break;
+         case NAL_SEI:
+             ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
++            h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
++            if (avctx->debug & FF_DEBUG_GREEN_MD)
++                debug_green_metadata(&h->sei.green_metadata, h->avctx);
++#if FF_API_AFD
++FF_DISABLE_DEPRECATION_WARNINGS
++            h->avctx->dtg_active_format = h->sei.afd.active_format_description;
++FF_ENABLE_DEPRECATION_WARNINGS
++#endif /* FF_API_AFD */
+             if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
+                 goto end;
+             break;
 -        if (context_count == h->nb_slice_ctx) {
++        case NAL_SPS: {
++            GetBitContext tmp_gb = nal->gb;
++            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
++                break;
++            av_log(h->avctx, AV_LOG_DEBUG,
++                   "SPS decoding failure, trying again with the complete NAL\n");
++            init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
++            if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
++                break;
++            ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
+             break;
++        }
+         case NAL_PPS:
+             ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
+                                                        nal->size_bits);
+             if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
+                 goto end;
+             break;
+         case NAL_AUD:
+         case NAL_END_SEQUENCE:
+         case NAL_END_STREAM:
+         case NAL_FILLER_DATA:
+         case NAL_SPS_EXT:
+         case NAL_AUXILIARY_SLICE:
+             break;
+         default:
+             av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
+                    nal->type, nal->size_bits);
+         }
 -        if (err < 0) {
 -            av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
++        if (context_count == h->max_contexts) {
+             ret = ff_h264_execute_decode_slices(h, context_count);
+             if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
+                 goto end;
+             context_count = 0;
+         }
 -static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
++        if (err < 0 || err == SLICE_SKIPED) {
++            if (err < 0)
++                av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
+             sl->ref_count[0] = sl->ref_count[1] = sl->list_count = 0;
++        } else if (err == SLICE_SINGLETHREAD) {
++            if (context_count > 0) {
++                ret = ff_h264_execute_decode_slices(h, context_count);
++                if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
++                    goto end;
++                context_count = 0;
++            }
++            /* Slice could not be decoded in parallel mode, restart. */
++            sl               = &h->slice_ctx[0];
++            goto again;
+         }
+     }
+     if (context_count) {
+         ret = ff_h264_execute_decode_slices(h, context_count);
+         if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
+             goto end;
+     }
+     ret = 0;
+ end:
++
++#if CONFIG_ERROR_RESILIENCE
++    /*
++     * FIXME: Error handling code does not seem to support interlaced
++     * when slices span multiple rows
++     * The ff_er_add_slice calls don't work right for bottom
++     * fields; they cause massive erroneous error concealing
++     * Error marking covers both fields (top and bottom).
++     * This causes a mismatched s->error_count
++     * and a bad error table. Further, the error count goes to
++     * INT_MAX when called for bottom field, because mb_y is
++     * past end by one (callers fault) and resync_mb_y != 0
++     * causes problems for the first MB line, too.
++     */
++    if (!FIELD_PICTURE(h) && h->current_slice &&
++        h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
++        h->enable_er) {
++
++        H264SliceContext *sl = h->slice_ctx;
++        int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
++
++        ff_h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);
++
++        if (use_last_pic) {
++            ff_h264_set_erpic(&sl->er.last_pic, &h->last_pic_for_ec);
++            sl->ref_list[0][0].parent = &h->last_pic_for_ec;
++            memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
++            memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
++            sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
++        } else if (sl->ref_count[0]) {
++            ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
++        } else
++            ff_h264_set_erpic(&sl->er.last_pic, NULL);
++
++        if (sl->ref_count[1])
++            ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
++
++        sl->er.ref_count = sl->ref_count[0];
++
++        ff_er_frame_end(&sl->er);
++        if (use_last_pic)
++            memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
++    }
++#endif /* CONFIG_ERROR_RESILIENCE */
+     /* clean up */
+     if (h->cur_pic_ptr && !h->droppable) {
+         ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
+                                   h->picture_structure == PICT_BOTTOM_FIELD);
+     }
+     return (ret < 0) ? ret : buf_size;
+ }
+ /**
+  * Return the number of bytes consumed for building the current frame.
+  */
+ static int get_consumed_bytes(int pos, int buf_size)
+ {
+     if (pos == 0)
+         pos = 1;        // avoid infinite loops (I doubt that is needed but...)
+     if (pos + 10 > buf_size)
+         pos = buf_size; // oops ;)
+     return pos;
+ }
 -    if (!h->ps.sps || !h->ps.sps->crop)
++static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)
+ {
++    AVFrame *src = srcp->f;
++    const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);
+     int i;
+     int ret = av_frame_ref(dst, src);
+     if (ret < 0)
+         return ret;
 -    for (i = 0; i < 3; i++) {
 -        int hshift = (i > 0) ? h->chroma_x_shift : 0;
 -        int vshift = (i > 0) ? h->chroma_y_shift : 0;
 -        int off    = ((h->ps.sps->crop_left >> hshift) << h->pixel_shift) +
 -                     (h->ps.sps->crop_top >> vshift) * dst->linesize[i];
++    av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);
++
++    h->backup_width   = h->avctx->width;
++    h->backup_height  = h->avctx->height;
++    h->backup_pix_fmt = h->avctx->pix_fmt;
++
++    h->avctx->width   = dst->width;
++    h->avctx->height  = dst->height;
++    h->avctx->pix_fmt = dst->format;
++
++    if (srcp->sei_recovery_frame_cnt == 0)
++        dst->key_frame = 1;
++    if (!srcp->crop)
+         return 0;
 -    const uint8_t *new_extradata;
 -    int new_extradata_size;
++    for (i = 0; i < desc->nb_components; i++) {
++        int hshift = (i > 0) ? desc->log2_chroma_w : 0;
++        int vshift = (i > 0) ? desc->log2_chroma_h : 0;
++        int off    = ((srcp->crop_left >> hshift) << h->pixel_shift) +
++                      (srcp->crop_top  >> vshift) * dst->linesize[i];
+         dst->data[i] += off;
+     }
+     return 0;
+ }
++static int is_extra(const uint8_t *buf, int buf_size)
++{
++    int cnt= buf[5]&0x1f;
++    const uint8_t *p= buf+6;
++    while(cnt--){
++        int nalsize= AV_RB16(p) + 2;
++        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 7)
++            return 0;
++        p += nalsize;
++    }
++    cnt = *(p++);
++    if(!cnt)
++        return 0;
++    while(cnt--){
++        int nalsize= AV_RB16(p) + 2;
++        if(nalsize > buf_size - (p-buf) || (p[2] & 0x9F) != 8)
++            return 0;
++        p += nalsize;
++    }
++    return 1;
++}
++
+ static int h264_decode_frame(AVCodecContext *avctx, void *data,
+                              int *got_frame, AVPacket *avpkt)
+ {
+     const uint8_t *buf = avpkt->data;
+     int buf_size       = avpkt->size;
+     H264Context *h     = avctx->priv_data;
+     AVFrame *pict      = data;
+     int buf_index      = 0;
++    H264Picture *out;
++    int i, out_idx;
+     int ret;
 -out:
+     h->flags = avctx->flags;
+     h->setup_finished = 0;
++    if (h->backup_width != -1) {
++        avctx->width    = h->backup_width;
++        h->backup_width = -1;
++    }
++    if (h->backup_height != -1) {
++        avctx->height    = h->backup_height;
++        h->backup_height = -1;
++    }
++    if (h->backup_pix_fmt != AV_PIX_FMT_NONE) {
++        avctx->pix_fmt    = h->backup_pix_fmt;
++        h->backup_pix_fmt = AV_PIX_FMT_NONE;
++    }
++
++    ff_h264_unref_picture(h, &h->last_pic_for_ec);
++
+     /* end of stream, output what is still in the buffers */
 -        H264Picture *out;
 -        int i, out_idx;
+     if (buf_size == 0) {
 -            ret = output_frame(h, pict, out->f);
++ out:
+         h->cur_pic_ptr = NULL;
++        h->first_field = 0;
+         // FIXME factorize this with the output code below
+         out     = h->delayed_pic[0];
+         out_idx = 0;
+         for (i = 1;
+              h->delayed_pic[i] &&
+              !h->delayed_pic[i]->f->key_frame &&
+              !h->delayed_pic[i]->mmco_reset;
+              i++)
+             if (h->delayed_pic[i]->poc < out->poc) {
+                 out     = h->delayed_pic[i];
+                 out_idx = i;
+             }
+         for (i = out_idx; h->delayed_pic[i]; i++)
+             h->delayed_pic[i] = h->delayed_pic[i + 1];
+         if (out) {
 -
 -    new_extradata_size = 0;
 -    new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
 -                                            &new_extradata_size);
 -    if (new_extradata_size > 0 && new_extradata) {
 -        ret = ff_h264_decode_extradata(new_extradata, new_extradata_size,
 -                                       &h->ps, &h->is_avc, &h->nal_length_size,
 -                                       avctx->err_recognition, avctx);
 -        if (ret < 0)
 -            return ret;
++            out->reference &= ~DELAYED_PIC_REF;
++            ret = output_frame(h, pict, out);
+             if (ret < 0)
+                 return ret;
+             *got_frame = 1;
+         }
+         return buf_index;
+     }
 -        buf_size = 0;
++    if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
++        int side_size;
++        uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
++        if (is_extra(side, side_size))
++            ff_h264_decode_extradata(side, side_size,
++                                     &h->ps, &h->is_avc, &h->nal_length_size,
++                                     avctx->err_recognition, avctx);
++    }
++    if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
++        if (is_extra(buf, buf_size))
++            return ff_h264_decode_extradata(buf, buf_size,
++                                            &h->ps, &h->is_avc, &h->nal_length_size,
++                                            avctx->err_recognition, avctx);
+     }
+     buf_index = decode_nal_units(h, buf, buf_size);
+     if (buf_index < 0)
+         return AVERROR_INVALIDDATA;
+     if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
 -        if (avctx->skip_frame >= AVDISCARD_NONREF)
 -            return 0;
++        av_assert0(buf_index <= buf_size);
+         goto out;
+     }
+     if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
 -        ff_h264_field_end(h, &h->slice_ctx[0], 0);
++        if (avctx->skip_frame >= AVDISCARD_NONREF ||
++            buf_size >= 4 && !memcmp("Q264", buf, 4))
++            return buf_size;
+         av_log(avctx, AV_LOG_ERROR, "no frame!\n");
+         return AVERROR_INVALIDDATA;
+     }
+     if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
+         (h->mb_y >= h->mb_height && h->mb_height)) {
+         if (avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)
+             decode_postinit(h, 1);
 -            ret = output_frame(h, pict, h->next_output_pic->f);
++        if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
++            return ret;
++        /* Wait for second field. */
+         *got_frame = 0;
+         if (h->next_output_pic && ((avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||
++                                   (avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||
+                                    h->next_output_pic->recovered)) {
+             if (!h->next_output_pic->recovered)
+                 h->next_output_pic->f->flags |= AV_FRAME_FLAG_CORRUPT;
 -    assert(pict->buf[0] || !*got_frame);
++            if (!h->avctx->hwaccel &&
++                 (h->next_output_pic->field_poc[0] == INT_MAX ||
++                  h->next_output_pic->field_poc[1] == INT_MAX)
++            ) {
++                int p;
++                AVFrame *f = h->next_output_pic->f;
++                int field = h->next_output_pic->field_poc[0] == INT_MAX;
++                uint8_t *dst_data[4];
++                int linesizes[4];
++                const uint8_t *src_data[4];
++
++                av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing\n", field);
++
++                for (p = 0; p<4; p++) {
++                    dst_data[p] = f->data[p] + (field^1)*f->linesize[p];
++                    src_data[p] = f->data[p] +  field   *f->linesize[p];
++                    linesizes[p] = 2*f->linesize[p];
++                }
++
++                av_image_copy(dst_data, linesizes, src_data, linesizes,
++                              f->format, f->width, f->height>>1);
++            }
++
++            ret = output_frame(h, pict, h->next_output_pic);
+             if (ret < 0)
+                 return ret;
+             *got_frame = 1;
++            if (CONFIG_MPEGVIDEO) {
++                ff_print_debug_info2(h->avctx, pict, NULL,
++                                    h->next_output_pic->mb_type,
++                                    h->next_output_pic->qscale_table,
++                                    h->next_output_pic->motion_val,
++                                    NULL,
++                                    h->mb_width, h->mb_height, h->mb_stride, 1);
++            }
+         }
+     }
 -    { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VD },
++    av_assert0(pict->buf[0] || !*got_frame);
++
++    ff_h264_unref_picture(h, &h->last_pic_for_ec);
+     return get_consumed_bytes(buf_index, buf_size);
+ }
+ #define OFFSET(x) offsetof(H264Context, x)
+ #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
+ static const AVOption h264_options[] = {
 -    .class_name = "h264",
++    {"is_avc", "is avc", offsetof(H264Context, is_avc), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, 0},
++    {"nal_length_size", "nal_length_size", offsetof(H264Context, nal_length_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 4, 0},
++    { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_BOOL, { .i64 = -1 }, -1, 1, VD },
+     { NULL },
+ };
+ static const AVClass h264_class = {
++    .class_name = "H264 Decoder",
+     .item_name  = av_default_item_name,
+     .option     = h264_options,
+     .version    = LIBAVUTIL_VERSION_INT,
+ };
+ AVCodec ff_h264_decoder = {
+     .name                  = "h264",
+     .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
+     .type                  = AVMEDIA_TYPE_VIDEO,
+     .id                    = AV_CODEC_ID_H264,
+     .priv_data_size        = sizeof(H264Context),
+     .init                  = ff_h264_decode_init,
+     .close                 = h264_decode_end,
+     .decode                = h264_decode_frame,
+     .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
+                              AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
+                              AV_CODEC_CAP_FRAME_THREADS,
+     .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,
+     .flush                 = flush_dpb,
+     .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
+     .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
+     .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
+     .priv_class            = &h264_class,
+ };
++
++#if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU
++static const AVClass h264_vdpau_class = {
++    .class_name = "H264 VDPAU Decoder",
++    .item_name  = av_default_item_name,
++    .option     = h264_options,
++    .version    = LIBAVUTIL_VERSION_INT,
++};
++
++AVCodec ff_h264_vdpau_decoder = {
++    .name           = "h264_vdpau",
++    .long_name      = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"),
++    .type           = AVMEDIA_TYPE_VIDEO,
++    .id             = AV_CODEC_ID_H264,
++    .priv_data_size = sizeof(H264Context),
++    .init           = ff_h264_decode_init,
++    .close          = h264_decode_end,
++    .decode         = h264_decode_frame,
++    .capabilities   = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU,
++    .flush          = flush_dpb,
++    .pix_fmts       = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264,
++                                                     AV_PIX_FMT_NONE},
++    .profiles       = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
++    .priv_class     = &h264_vdpau_class,
++};
++#endif
index 0000000,300077d..009a861
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,938 +1,1008 @@@
 - * This file is part of Libav.
+ /*
+  * H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
+  * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
+  *
 - * Libav is free software; you can redistribute it and/or
++ * This file is part of FFmpeg.
+  *
 - * Libav is distributed in the hope that it will be useful,
++ * FFmpeg is free software; you can redistribute it and/or
+  * modify it under the terms of the GNU Lesser General Public
+  * License as published by the Free Software Foundation; either
+  * version 2.1 of the License, or (at your option) any later version.
+  *
 - * License along with Libav; if not, write to the Free Software
++ * FFmpeg is distributed in the hope that it will be useful,
+  * but WITHOUT ANY WARRANTY; without even the implied warranty of
+  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  * Lesser General Public License for more details.
+  *
+  * You should have received a copy of the GNU Lesser General Public
 -#define H264_MAX_PICTURE_COUNT 32
++ * License along with FFmpeg; if not, write to the Free Software
+  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+  */
+ /**
+  * @file
+  * H.264 / AVC / MPEG-4 part10 codec.
+  * @author Michael Niedermayer <michaelni@gmx.at>
+  */
+ #ifndef AVCODEC_H264DEC_H
+ #define AVCODEC_H264DEC_H
+ #include "libavutil/buffer.h"
+ #include "libavutil/intreadwrite.h"
+ #include "libavutil/thread.h"
+ #include "cabac.h"
+ #include "error_resilience.h"
+ #include "h264_parse.h"
+ #include "h264_sei.h"
+ #include "h2645_parse.h"
+ #include "h264chroma.h"
+ #include "h264dsp.h"
+ #include "h264pred.h"
+ #include "h264qpel.h"
+ #include "internal.h"
+ #include "mpegutils.h"
+ #include "parser.h"
+ #include "qpeldsp.h"
+ #include "rectangle.h"
+ #include "videodsp.h"
 -#define MB_MBAFF(h)    h->mb_mbaff
 -#define MB_FIELD(h)    h->mb_field_decoding_flag
 -#define FRAME_MBAFF(h) h->mb_aff_frame
 -#define FIELD_PICTURE(h) (h->picture_structure != PICT_FRAME)
++#define H264_MAX_PICTURE_COUNT 36
+ #define MAX_SPS_COUNT          32
+ #define MAX_PPS_COUNT         256
+ #define MAX_MMCO_COUNT         66
+ #define MAX_DELAYED_PIC_COUNT  16
++#define MAX_MBPAIR_SIZE (256*1024) // a tighter bound could be calculated if someone cares about a few bytes
++
+ /* Compiling in interlaced support reduces the speed
+  * of progressive decoding by about 2%. */
+ #define ALLOW_INTERLACE
+ #define FMO 0
+ /**
+  * The maximum number of slices supported by the decoder.
+  * must be a power of 2
+  */
+ #define MAX_SLICES 32
+ #ifdef ALLOW_INTERLACE
 -#define MB_FIELD(h)      0
++#define MB_MBAFF(h)    (h)->mb_mbaff
++#define MB_FIELD(sl)  (sl)->mb_field_decoding_flag
++#define FRAME_MBAFF(h) (h)->mb_aff_frame
++#define FIELD_PICTURE(h) ((h)->picture_structure != PICT_FRAME)
+ #define LEFT_MBS 2
+ #define LTOP     0
+ #define LBOT     1
+ #define LEFT(i)  (i)
+ #else
+ #define MB_MBAFF(h)      0
 -#define CABAC(h) h->ps.pps->cabac
++#define MB_FIELD(sl)     0
+ #define FRAME_MBAFF(h)   0
+ #define FIELD_PICTURE(h) 0
+ #undef  IS_INTERLACED
+ #define IS_INTERLACED(mb_type) 0
+ #define LEFT_MBS 1
+ #define LTOP     0
+ #define LBOT     0
+ #define LEFT(i)  0
+ #endif
+ #define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
+ #ifndef CABAC
 -#define CHROMA422(h) (h->ps.sps->chroma_format_idc == 2)
 -#define CHROMA444(h) (h->ps.sps->chroma_format_idc == 3)
++#define CABAC(h) (h)->ps.pps->cabac
+ #endif
 -#define QP_MAX_NUM (51 + 2 * 6)           // The maximum supported qp
++#define CHROMA(h)    ((h)->ps.sps->chroma_format_idc)
++#define CHROMA422(h) ((h)->ps.sps->chroma_format_idc == 2)
++#define CHROMA444(h) ((h)->ps.sps->chroma_format_idc == 3)
+ #define EXTENDED_SAR       255
+ #define MB_TYPE_REF0       MB_TYPE_ACPRED // dirty but it fits in 16 bit
+ #define MB_TYPE_8x8DCT     0x01000000
+ #define IS_REF0(a)         ((a) & MB_TYPE_REF0)
+ #define IS_8x8DCT(a)       ((a) & MB_TYPE_8x8DCT)
 -    uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
++#define QP_MAX_NUM (51 + 6*6)           // The maximum supported qp
+ /* NAL unit types */
+ enum {
+     NAL_SLICE           = 1,
+     NAL_DPA             = 2,
+     NAL_DPB             = 3,
+     NAL_DPC             = 4,
+     NAL_IDR_SLICE       = 5,
+     NAL_SEI             = 6,
+     NAL_SPS             = 7,
+     NAL_PPS             = 8,
+     NAL_AUD             = 9,
+     NAL_END_SEQUENCE    = 10,
+     NAL_END_STREAM      = 11,
+     NAL_FILLER_DATA     = 12,
+     NAL_SPS_EXT         = 13,
+     NAL_AUXILIARY_SLICE = 19,
+ };
+ /**
+  * Sequence parameter set
+  */
+ typedef struct SPS {
+     unsigned int sps_id;
+     int profile_idc;
+     int level_idc;
+     int chroma_format_idc;
+     int transform_bypass;              ///< qpprime_y_zero_transform_bypass_flag
+     int log2_max_frame_num;            ///< log2_max_frame_num_minus4 + 4
+     int poc_type;                      ///< pic_order_cnt_type
+     int log2_max_poc_lsb;              ///< log2_max_pic_order_cnt_lsb_minus4
+     int delta_pic_order_always_zero_flag;
+     int offset_for_non_ref_pic;
+     int offset_for_top_to_bottom_field;
+     int poc_cycle_length;              ///< num_ref_frames_in_pic_order_cnt_cycle
+     int ref_frame_count;               ///< num_ref_frames
+     int gaps_in_frame_num_allowed_flag;
+     int mb_width;                      ///< pic_width_in_mbs_minus1 + 1
+     int mb_height;                     ///< pic_height_in_map_units_minus1 + 1
+     int frame_mbs_only_flag;
+     int mb_aff;                        ///< mb_adaptive_frame_field_flag
+     int direct_8x8_inference_flag;
+     int crop;                          ///< frame_cropping_flag
+     /* those 4 are already in luma samples */
+     unsigned int crop_left;            ///< frame_cropping_rect_left_offset
+     unsigned int crop_right;           ///< frame_cropping_rect_right_offset
+     unsigned int crop_top;             ///< frame_cropping_rect_top_offset
+     unsigned int crop_bottom;          ///< frame_cropping_rect_bottom_offset
+     int vui_parameters_present_flag;
+     AVRational sar;
+     int video_signal_type_present_flag;
+     int full_range;
+     int colour_description_present_flag;
+     enum AVColorPrimaries color_primaries;
+     enum AVColorTransferCharacteristic color_trc;
+     enum AVColorSpace colorspace;
+     int timing_info_present_flag;
+     uint32_t num_units_in_tick;
+     uint32_t time_scale;
+     int fixed_frame_rate_flag;
+     short offset_for_ref_frame[256]; // FIXME dyn aloc?
+     int bitstream_restriction_flag;
+     int num_reorder_frames;
+     int scaling_matrix_present;
+     uint8_t scaling_matrix4[6][16];
+     uint8_t scaling_matrix8[6][64];
+     int nal_hrd_parameters_present_flag;
+     int vcl_hrd_parameters_present_flag;
+     int pic_struct_present_flag;
+     int time_offset_length;
+     int cpb_cnt;                          ///< See H.264 E.1.2
+     int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
+     int cpb_removal_delay_length;         ///< cpb_removal_delay_length_minus1 + 1
+     int dpb_output_delay_length;          ///< dpb_output_delay_length_minus1 + 1
+     int bit_depth_luma;                   ///< bit_depth_luma_minus8 + 8
+     int bit_depth_chroma;                 ///< bit_depth_chroma_minus8 + 8
+     int residual_color_transform_flag;    ///< residual_colour_transform_flag
+     int constraint_set_flags;             ///< constraint_set[0-3]_flag
++    uint8_t data[4096];
++    size_t data_size;
+ } SPS;
+ /**
+  * Picture parameter set
+  */
+ typedef struct PPS {
+     unsigned int sps_id;
+     int cabac;                  ///< entropy_coding_mode_flag
+     int pic_order_present;      ///< pic_order_present_flag
+     int slice_group_count;      ///< num_slice_groups_minus1 + 1
+     int mb_slice_group_map_type;
+     unsigned int ref_count[2];  ///< num_ref_idx_l0/1_active_minus1 + 1
+     int weighted_pred;          ///< weighted_pred_flag
+     int weighted_bipred_idc;
+     int init_qp;                ///< pic_init_qp_minus26 + 26
+     int init_qs;                ///< pic_init_qs_minus26 + 26
+     int chroma_qp_index_offset[2];
+     int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
+     int constrained_intra_pred;     ///< constrained_intra_pred_flag
+     int redundant_pic_cnt_present;  ///< redundant_pic_cnt_present_flag
+     int transform_8x8_mode;         ///< transform_8x8_mode_flag
+     uint8_t scaling_matrix4[6][16];
+     uint8_t scaling_matrix8[6][64];
 -    // FIXME this should properly be const
 -    SPS *sps;
++    uint8_t chroma_qp_table[2][QP_MAX_NUM+1];  ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
+     int chroma_qp_diff;
++    uint8_t data[4096];
++    size_t data_size;
+     uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
+     uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
+     uint32_t(*dequant4_coeff[6])[16];
+     uint32_t(*dequant8_coeff[6])[64];
+ } PPS;
+ typedef struct H264ParamSets {
+     AVBufferRef *sps_list[MAX_SPS_COUNT];
+     AVBufferRef *pps_list[MAX_PPS_COUNT];
++    AVBufferRef *pps_ref;
++    AVBufferRef *sps_ref;
+     /* currently active parameters sets */
+     const PPS *pps;
 -    int ref_poc[2][2][32];  ///< POCs of the frames used as reference (FIXME need per slice)
++    const SPS *sps;
+ } H264ParamSets;
+ /**
+  * Memory management control operation opcode.
+  */
+ typedef enum MMCOOpcode {
+     MMCO_END = 0,
+     MMCO_SHORT2UNUSED,
+     MMCO_LONG2UNUSED,
+     MMCO_SHORT2LONG,
+     MMCO_SET_MAX_LONG,
+     MMCO_RESET,
+     MMCO_LONG,
+ } MMCOOpcode;
+ /**
+  * Memory management control operation.
+  */
+ typedef struct MMCO {
+     MMCOOpcode opcode;
+     int short_pic_num;  ///< pic_num without wrapping (pic_num & max_pic_num)
+     int long_arg;       ///< index, pic_num, or num long refs depending on opcode
+ } MMCO;
+ typedef struct H264Picture {
+     AVFrame *f;
+     ThreadFrame tf;
+     AVBufferRef *qscale_table_buf;
+     int8_t *qscale_table;
+     AVBufferRef *motion_val_buf[2];
+     int16_t (*motion_val[2])[2];
+     AVBufferRef *mb_type_buf;
+     uint32_t *mb_type;
+     AVBufferRef *hwaccel_priv_buf;
+     void *hwaccel_picture_private; ///< hardware accelerator private data
+     AVBufferRef *ref_index_buf[2];
+     int8_t *ref_index[2];
+     int field_poc[2];       ///< top/bottom POC
+     int poc;                ///< frame POC
+     int frame_num;          ///< frame_num (raw frame_num from slice header)
+     int mmco_reset;         /**< MMCO_RESET set this 1. Reordering code must
+                                  not mix pictures before and after MMCO_RESET. */
+     int pic_id;             /**< pic_num (short -> no wrap version of pic_num,
+                                  pic_num & max_pic_num; long -> long_pic_num) */
+     int long_ref;           ///< 1->long term reference 0->short term reference
 -        uint8_t val;
++    int ref_poc[2][2][32];  ///< POCs of the frames/fields used as reference (FIXME need per slice)
+     int ref_count[2][2];    ///< number of entries in ref_poc         (FIXME need per slice)
+     int mbaff;              ///< 1 -> MBAFF frame 0-> not MBAFF
+     int field_picture;      ///< whether or not picture was encoded in separate fields
+     int reference;
+     int recovered;          ///< picture at IDR or recovery point + recovery count
++    int invalid_gap;
++    int sei_recovery_frame_cnt;
++
++    int crop;
++    int crop_left;
++    int crop_top;
+ } H264Picture;
+ typedef struct H264Ref {
+     uint8_t *data[3];
+     int linesize[3];
+     int reference;
+     int poc;
+     int pic_id;
+     H264Picture *parent;
+ } H264Ref;
+ typedef struct H264SliceContext {
+     struct H264Context *h264;
+     GetBitContext gb;
+     ERContext er;
+     int slice_num;
+     int slice_type;
+     int slice_type_nos;         ///< S free slice type (SI/SP are remapped to I/P)
+     int slice_type_fixed;
+     int qscale;
+     int chroma_qp[2];   // QPc
+     int qp_thresh;      ///< QP threshold to skip loopfilter
+     int last_qscale_diff;
+     // deblock
+     int deblocking_filter;          ///< disable_deblocking_filter_idc with 1 <-> 0
+     int slice_alpha_c0_offset;
+     int slice_beta_offset;
+     H264PredWeightTable pwt;
+     int prev_mb_skipped;
+     int next_mb_skipped;
+     int chroma_pred_mode;
+     int intra16x16_pred_mode;
+     int8_t intra4x4_pred_mode_cache[5 * 8];
+     int8_t(*intra4x4_pred_mode);
+     int topleft_mb_xy;
+     int top_mb_xy;
+     int topright_mb_xy;
+     int left_mb_xy[LEFT_MBS];
+     int topleft_type;
+     int top_type;
+     int topright_type;
+     int left_type[LEFT_MBS];
+     const uint8_t *left_block;
+     int topleft_partition;
+     unsigned int topleft_samples_available;
+     unsigned int top_samples_available;
+     unsigned int topright_samples_available;
+     unsigned int left_samples_available;
+     ptrdiff_t linesize, uvlinesize;
+     ptrdiff_t mb_linesize;  ///< may be equal to s->linesize or s->linesize * 2, for mbaff
+     ptrdiff_t mb_uvlinesize;
+     int mb_x, mb_y;
+     int mb_xy;
+     int resync_mb_x;
+     int resync_mb_y;
+     unsigned int first_mb_addr;
+     // index of the first MB of the next slice
+     int next_slice_idx;
+     int mb_skip_run;
+     int is_complex;
+     int picture_structure;
+     int mb_field_decoding_flag;
+     int mb_mbaff;               ///< mb_aff_frame && mb_field_decoding_flag
+     int redundant_pic_count;
+     /**
+      * number of neighbors (top and/or left) that used 8x8 dct
+      */
+     int neighbor_transform_size;
+     int direct_spatial_mv_pred;
+     int col_parity;
+     int col_fieldoff;
+     int cbp;
+     int top_cbp;
+     int left_cbp;
+     int dist_scale_factor[32];
+     int dist_scale_factor_field[2][32];
+     int map_col_to_list0[2][16 + 32];
+     int map_col_to_list0_field[2][2][16 + 32];
+     /**
+      * num_ref_idx_l0/1_active_minus1 + 1
+      */
+     unsigned int ref_count[2];          ///< counts frames or fields, depending on current mb mode
+     unsigned int list_count;
+     H264Ref ref_list[2][48];        /**< 0..15: frame refs, 16..47: mbaff field refs.
+                                          *   Reordered version of default_ref_list
+                                          *   according to picture reordering in slice header */
+     struct {
+         uint8_t op;
 -    const uint8_t *zigzag_scan_q0;
 -    const uint8_t *zigzag_scan8x8_q0;
 -    const uint8_t *zigzag_scan8x8_cavlc_q0;
 -    const uint8_t *field_scan_q0;
 -    const uint8_t *field_scan8x8_q0;
 -    const uint8_t *field_scan8x8_cavlc_q0;
++        uint32_t val;
+     } ref_modifications[2][32];
+     int nb_ref_modifications[2];
+     unsigned int pps_id;
+     const uint8_t *intra_pcm_ptr;
+     int16_t *dc_val_base;
+     uint8_t *bipred_scratchpad;
+     uint8_t *edge_emu_buffer;
+     uint8_t (*top_borders[2])[(16 * 3) * 2];
+     int bipred_scratchpad_allocated;
+     int edge_emu_buffer_allocated;
+     int top_borders_allocated[2];
+     /**
+      * non zero coeff count cache.
+      * is 64 if not available.
+      */
+     DECLARE_ALIGNED(8, uint8_t, non_zero_count_cache)[15 * 8];
+     /**
+      * Motion vector cache.
+      */
+     DECLARE_ALIGNED(16, int16_t, mv_cache)[2][5 * 8][2];
+     DECLARE_ALIGNED(8,  int8_t, ref_cache)[2][5 * 8];
+     DECLARE_ALIGNED(16, uint8_t, mvd_cache)[2][5 * 8][2];
+     uint8_t direct_cache[5 * 8];
+     DECLARE_ALIGNED(8, uint16_t, sub_mb_type)[4];
+     ///< as a DCT coefficient is int32_t in high depth, we need to reserve twice the space.
+     DECLARE_ALIGNED(16, int16_t, mb)[16 * 48 * 2];
+     DECLARE_ALIGNED(16, int16_t, mb_luma_dc)[3][16 * 2];
+     ///< as mb is addressed by scantable[i] and scantable is uint8_t we can either
+     ///< check that i is not too large or ensure that there is some unused stuff after mb
+     int16_t mb_padding[256 * 2];
+     uint8_t (*mvd_table[2])[2];
+     /**
+      * Cabac
+      */
+     CABACContext cabac;
+     uint8_t cabac_state[1024];
+     int cabac_init_idc;
+     MMCO mmco[MAX_MMCO_COUNT];
+     int  nb_mmco;
+     int explicit_ref_marking;
+     int frame_num;
+     int poc_lsb;
+     int delta_poc_bottom;
+     int delta_poc[2];
+     int curr_pic_num;
+     int max_pic_num;
+ } H264SliceContext;
+ /**
+  * H264Context
+  */
+ typedef struct H264Context {
+     const AVClass *class;
+     AVCodecContext *avctx;
+     VideoDSPContext vdsp;
+     H264DSPContext h264dsp;
+     H264ChromaContext h264chroma;
+     H264QpelContext h264qpel;
+     H264Picture DPB[H264_MAX_PICTURE_COUNT];
+     H264Picture *cur_pic_ptr;
+     H264Picture cur_pic;
++    H264Picture last_pic_for_ec;
+     H264SliceContext *slice_ctx;
+     int            nb_slice_ctx;
+     H2645Packet pkt;
+     int pixel_shift;    ///< 0 for 8-bit H.264, 1 for high-bit-depth H.264
+     /* coded dimensions -- 16 * mb w/h */
+     int width, height;
+     int chroma_x_shift, chroma_y_shift;
++    /**
++     * Backup frame properties: needed, because they can be different
++     * between returned frame and last decoded frame.
++     **/
++    int backup_width;
++    int backup_height;
++    enum AVPixelFormat backup_pix_fmt;
++
+     int droppable;
+     int coded_picture_number;
+     int context_initialized;
+     int flags;
+     int workaround_bugs;
+     /* Set when slice threading is used and at least one slice uses deblocking
+      * mode 1 (i.e. across slice boundaries). Then we disable the loop filter
+      * during normal MB decoding and execute it serially at the end.
+      */
+     int postpone_filter;
+     int8_t(*intra4x4_pred_mode);
+     H264PredContext hpc;
+     uint8_t (*non_zero_count)[48];
+ #define LIST_NOT_USED -1 // FIXME rename?
+ #define PART_NOT_AVAILABLE -2
+     /**
+      * block_offset[ 0..23] for frame macroblocks
+      * block_offset[24..47] for field macroblocks
+      */
+     int block_offset[2 * (16 * 3)];
+     uint32_t *mb2b_xy;  // FIXME are these 4 a good idea?
+     uint32_t *mb2br_xy;
+     int b_stride;       // FIXME use s->b4_stride
+     uint16_t *slice_table;      ///< slice_table_base + 2*mb_stride + 1
+     // interlacing specific flags
+     int mb_aff_frame;
+     int picture_structure;
+     int first_field;
+     uint8_t *list_counts;               ///< Array of list_count per MB specifying the slice type
+     /* 0x100 -> non null luma_dc, 0x80/0x40 -> non null chroma_dc (cb/cr), 0x?0 -> chroma_cbp(0, 1, 2), 0x0? luma_cbp */
+     uint16_t *cbp_table;
+     /* chroma_pred_mode for i4x4 or i16x16, else 0 */
+     uint8_t *chroma_pred_mode_table;
+     uint8_t (*mvd_table[2])[2];
+     uint8_t *direct_table;
+     uint8_t zigzag_scan[16];
+     uint8_t zigzag_scan8x8[64];
+     uint8_t zigzag_scan8x8_cavlc[64];
+     uint8_t field_scan[16];
+     uint8_t field_scan8x8[64];
+     uint8_t field_scan8x8_cavlc[64];
 -                                     H264ParamSets *ps);
++    uint8_t zigzag_scan_q0[16];
++    uint8_t zigzag_scan8x8_q0[64];
++    uint8_t zigzag_scan8x8_cavlc_q0[64];
++    uint8_t field_scan_q0[16];
++    uint8_t field_scan8x8_q0[64];
++    uint8_t field_scan8x8_cavlc_q0[64];
+     int mb_y;
+     int mb_height, mb_width;
+     int mb_stride;
+     int mb_num;
+     // =============================================================
+     // Things below are not used in the MB or more inner code
+     int nal_ref_idc;
+     int nal_unit_type;
+     /**
+      * Used to parse AVC variant of H.264
+      */
+     int is_avc;           ///< this flag is != 0 if codec is avc1
+     int nal_length_size;  ///< Number of bytes used for nal length (1, 2 or 4)
+     int bit_depth_luma;         ///< luma bit depth from sps to detect changes
+     int chroma_format_idc;      ///< chroma format from sps to detect changes
+     H264ParamSets ps;
+     uint16_t *slice_table_base;
+     H264POCContext poc;
++    H264Ref default_ref[2];
+     H264Picture *short_ref[32];
+     H264Picture *long_ref[32];
+     H264Picture *delayed_pic[MAX_DELAYED_PIC_COUNT + 2]; // FIXME size?
+     int last_pocs[MAX_DELAYED_PIC_COUNT];
+     H264Picture *next_output_pic;
+     int next_outputed_poc;
+     /**
+      * memory management control operations buffer.
+      */
+     MMCO mmco[MAX_MMCO_COUNT];
+     int  nb_mmco;
+     int mmco_reset;
+     int explicit_ref_marking;
+     int long_ref_count;     ///< number of actual long term references
+     int short_ref_count;    ///< number of actual short term references
+     /**
+      * @name Members for slice based multithreading
+      * @{
+      */
+     /**
+      * current slice number, used to initialize slice_num of each thread/context
+      */
+     int current_slice;
++    /**
++     * Max number of threads / contexts.
++     * This is equal to AVCodecContext.thread_count unless
++     * multithreaded decoding is impossible, in which case it is
++     * reduced to 1.
++     */
++    int max_contexts;
++
++    /**
++     *  1 if the single thread fallback warning has already been
++     *  displayed, 0 otherwise.
++     */
++    int single_decode_warning;
++
+     /** @} */
+     /**
+      * Complement sei_pic_struct
+      * SEI_PIC_STRUCT_TOP_BOTTOM and SEI_PIC_STRUCT_BOTTOM_TOP indicate interlaced frames.
+      * However, soft telecined frames may have these values.
+      * This is used in an attempt to flag soft telecine progressive.
+      */
+     int prev_interlaced_frame;
+     /**
++     * Are the SEI recovery points looking valid.
++     */
++    int valid_recovery_point;
++
++    /**
+      * recovery_frame is the frame_num at which the next frame should
+      * be fully constructed.
+      *
+      * Set to -1 when not expecting a recovery point.
+      */
+     int recovery_frame;
+ /**
+  * We have seen an IDR, so all the following frames in coded order are correctly
+  * decodable.
+  */
+ #define FRAME_RECOVERED_IDR  (1 << 0)
+ /**
+  * Sufficient number of frames have been decoded since a SEI recovery point,
+  * so all the following frames in presentation order are correct.
+  */
+ #define FRAME_RECOVERED_SEI  (1 << 1)
+     int frame_recovered;    ///< Initial frame has been completely recovered
++    int has_recovery_point;
++
++    int missing_fields;
++
+     /* for frame threading, this is set to 1
+      * after finish_setup() has been called, so we cannot modify
+      * some context properties (which are supposed to stay constant between
+      * slices) anymore */
+     int setup_finished;
++    int cur_chroma_format_idc;
++    int cur_bit_depth_luma;
++    int16_t slice_row[MAX_SLICES]; ///< to detect when MAX_SLICES is too low
++
+     int enable_er;
+     H264SEIContext sei;
+     AVBufferPool *qscale_table_pool;
+     AVBufferPool *mb_type_pool;
+     AVBufferPool *motion_val_pool;
+     AVBufferPool *ref_index_pool;
+     int ref2frm[MAX_SLICES][2][64];     ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
+ } H264Context;
+ extern const uint16_t ff_h264_mb_sizes[4];
+ /**
++ * Uninit H264 param sets structure.
++ */
++
++void ff_h264_ps_uninit(H264ParamSets *ps);
++
++/**
+  * Decode SPS
+  */
+ int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
 -int ff_h264_build_ref_list(const H264Context *h, H264SliceContext *sl);
++                                     H264ParamSets *ps, int ignore_truncation);
+ /**
+  * Decode PPS
+  */
+ int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
+                                          H264ParamSets *ps, int bit_length);
+ /**
+  * Reconstruct bitstream slice_type.
+  */
+ int ff_h264_get_slice_type(const H264SliceContext *sl);
+ /**
+  * Allocate tables.
+  * needs width/height
+  */
+ int ff_h264_alloc_tables(H264Context *h);
+ int ff_h264_decode_ref_pic_list_reordering(const H264Context *h, H264SliceContext *sl);
 -void ff_h264_init_dequant_tables(H264Context *h);
 -
++int ff_h264_build_ref_list(H264Context *h, H264SliceContext *sl);
+ void ff_h264_remove_all_refs(H264Context *h);
+ /**
+  * Execute the reference picture marking (memory management control operations).
+  */
+ int ff_h264_execute_ref_pic_marking(H264Context *h);
+ int ff_h264_decode_ref_pic_marking(const H264Context *h, H264SliceContext *sl,
+                                    GetBitContext *gb);
+ void ff_h264_hl_decode_mb(const H264Context *h, H264SliceContext *sl);
+ int ff_h264_decode_init(AVCodecContext *avctx);
+ void ff_h264_decode_init_vlc(void);
+ /**
+  * Decode a macroblock
+  * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
+  */
+ int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl);
+ /**
+  * Decode a CABAC coded macroblock
+  * @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
+  */
+ int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl);
+ void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl);
 -static av_always_inline uint32_t pack16to32(int a, int b)
+ void ff_h264_direct_dist_scale_factor(const H264Context *const h, H264SliceContext *sl);
+ void ff_h264_direct_ref_list_init(const H264Context *const h, H264SliceContext *sl);
+ void ff_h264_pred_direct_motion(const H264Context *const h, H264SliceContext *sl,
+                                 int *mb_type);
+ void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
+                             uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
+                             unsigned int linesize, unsigned int uvlinesize);
+ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, int mb_x, int mb_y,
+                        uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
+                        unsigned int linesize, unsigned int uvlinesize);
+ /*
+  * o-o o-o
+  *  / / /
+  * o-o o-o
+  *  ,---'
+  * o-o o-o
+  *  / / /
+  * o-o o-o
+  */
+ /* Scan8 organization:
+  *    0 1 2 3 4 5 6 7
+  * 0  DY    y y y y y
+  * 1        y Y Y Y Y
+  * 2        y Y Y Y Y
+  * 3        y Y Y Y Y
+  * 4        y Y Y Y Y
+  * 5  DU    u u u u u
+  * 6        u U U U U
+  * 7        u U U U U
+  * 8        u U U U U
+  * 9        u U U U U
+  * 10 DV    v v v v v
+  * 11       v V V V V
+  * 12       v V V V V
+  * 13       v V V V V
+  * 14       v V V V V
+  * DY/DU/DV are for luma/chroma DC.
+  */
+ #define LUMA_DC_BLOCK_INDEX   48
+ #define CHROMA_DC_BLOCK_INDEX 49
+ // This table must be here because scan8[constant] must be known at compiletime
+ static const uint8_t scan8[16 * 3 + 3] = {
+     4 +  1 * 8, 5 +  1 * 8, 4 +  2 * 8, 5 +  2 * 8,
+     6 +  1 * 8, 7 +  1 * 8, 6 +  2 * 8, 7 +  2 * 8,
+     4 +  3 * 8, 5 +  3 * 8, 4 +  4 * 8, 5 +  4 * 8,
+     6 +  3 * 8, 7 +  3 * 8, 6 +  4 * 8, 7 +  4 * 8,
+     4 +  6 * 8, 5 +  6 * 8, 4 +  7 * 8, 5 +  7 * 8,
+     6 +  6 * 8, 7 +  6 * 8, 6 +  7 * 8, 7 +  7 * 8,
+     4 +  8 * 8, 5 +  8 * 8, 4 +  9 * 8, 5 +  9 * 8,
+     6 +  8 * 8, 7 +  8 * 8, 6 +  9 * 8, 7 +  9 * 8,
+     4 + 11 * 8, 5 + 11 * 8, 4 + 12 * 8, 5 + 12 * 8,
+     6 + 11 * 8, 7 + 11 * 8, 6 + 12 * 8, 7 + 12 * 8,
+     4 + 13 * 8, 5 + 13 * 8, 4 + 14 * 8, 5 + 14 * 8,
+     6 + 13 * 8, 7 + 13 * 8, 6 + 14 * 8, 7 + 14 * 8,
+     0 +  0 * 8, 0 +  5 * 8, 0 + 10 * 8
+ };
 -static av_always_inline uint16_t pack8to16(int a, int b)
++static av_always_inline uint32_t pack16to32(unsigned a, unsigned b)
+ {
+ #if HAVE_BIGENDIAN
+     return (b & 0xFFFF) + (a << 16);
+ #else
+     return (a & 0xFFFF) + (b << 16);
+ #endif
+ }
++static av_always_inline uint16_t pack8to16(unsigned a, unsigned b)
+ {
+ #if HAVE_BIGENDIAN
+     return (b & 0xFF) + (a << 8);
+ #else
+     return (a & 0xFF) + (b << 8);
+ #endif
+ }
+ /**
+  * Get the chroma qp.
+  */
+ static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
+ {
+     return pps->chroma_qp_table[t][qscale];
+ }
+ /**
+  * Get the predicted intra4x4 prediction mode.
+  */
+ static av_always_inline int pred_intra_mode(const H264Context *h,
+                                             H264SliceContext *sl, int n)
+ {
+     const int index8 = scan8[n];
+     const int left   = sl->intra4x4_pred_mode_cache[index8 - 1];
+     const int top    = sl->intra4x4_pred_mode_cache[index8 - 8];
+     const int min    = FFMIN(left, top);
+     ff_tlog(h->avctx, "mode:%d %d min:%d\n", left, top, min);
+     if (min < 0)
+         return DC_PRED;
+     else
+         return min;
+ }
+ static av_always_inline void write_back_intra_pred_mode(const H264Context *h,
+                                                         H264SliceContext *sl)
+ {
+     int8_t *i4x4       = sl->intra4x4_pred_mode + h->mb2br_xy[sl->mb_xy];
+     int8_t *i4x4_cache = sl->intra4x4_pred_mode_cache;
+     AV_COPY32(i4x4, i4x4_cache + 4 + 8 * 4);
+     i4x4[4] = i4x4_cache[7 + 8 * 3];
+     i4x4[5] = i4x4_cache[7 + 8 * 2];
+     i4x4[6] = i4x4_cache[7 + 8 * 1];
+ }
+ static av_always_inline void write_back_non_zero_count(const H264Context *h,
+                                                        H264SliceContext *sl)
+ {
+     const int mb_xy    = sl->mb_xy;
+     uint8_t *nnz       = h->non_zero_count[mb_xy];
+     uint8_t *nnz_cache = sl->non_zero_count_cache;
+     AV_COPY32(&nnz[ 0], &nnz_cache[4 + 8 * 1]);
+     AV_COPY32(&nnz[ 4], &nnz_cache[4 + 8 * 2]);
+     AV_COPY32(&nnz[ 8], &nnz_cache[4 + 8 * 3]);
+     AV_COPY32(&nnz[12], &nnz_cache[4 + 8 * 4]);
+     AV_COPY32(&nnz[16], &nnz_cache[4 + 8 * 6]);
+     AV_COPY32(&nnz[20], &nnz_cache[4 + 8 * 7]);
+     AV_COPY32(&nnz[32], &nnz_cache[4 + 8 * 11]);
+     AV_COPY32(&nnz[36], &nnz_cache[4 + 8 * 12]);
+     if (!h->chroma_y_shift) {
+         AV_COPY32(&nnz[24], &nnz_cache[4 + 8 * 8]);
+         AV_COPY32(&nnz[28], &nnz_cache[4 + 8 * 9]);
+         AV_COPY32(&nnz[40], &nnz_cache[4 + 8 * 13]);
+         AV_COPY32(&nnz[44], &nnz_cache[4 + 8 * 14]);
+     }
+ }
+ static av_always_inline void write_back_motion_list(const H264Context *h,
+                                                     H264SliceContext *sl,
+                                                     int b_stride,
+                                                     int b_xy, int b8_xy,
+                                                     int mb_type, int list)
+ {
+     int16_t(*mv_dst)[2] = &h->cur_pic.motion_val[list][b_xy];
+     int16_t(*mv_src)[2] = &sl->mv_cache[list][scan8[0]];
+     AV_COPY128(mv_dst + 0 * b_stride, mv_src + 8 * 0);
+     AV_COPY128(mv_dst + 1 * b_stride, mv_src + 8 * 1);
+     AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
+     AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
+     if (CABAC(h)) {
+         uint8_t (*mvd_dst)[2] = &sl->mvd_table[list][FMO ? 8 * sl->mb_xy
+                                                         : h->mb2br_xy[sl->mb_xy]];
+         uint8_t(*mvd_src)[2]  = &sl->mvd_cache[list][scan8[0]];
+         if (IS_SKIP(mb_type)) {
+             AV_ZERO128(mvd_dst);
+         } else {
+             AV_COPY64(mvd_dst, mvd_src + 8 * 3);
+             AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
+             AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
+             AV_COPY16(mvd_dst + 3 + 1, mvd_src + 3 + 8 * 2);
+         }
+     }
+     {
+         int8_t *ref_index = &h->cur_pic.ref_index[list][b8_xy];
+         int8_t *ref_cache = sl->ref_cache[list];
+         ref_index[0 + 0 * 2] = ref_cache[scan8[0]];
+         ref_index[1 + 0 * 2] = ref_cache[scan8[4]];
+         ref_index[0 + 1 * 2] = ref_cache[scan8[8]];
+         ref_index[1 + 1 * 2] = ref_cache[scan8[12]];
+     }
+ }
+ static av_always_inline void write_back_motion(const H264Context *h,
+                                                H264SliceContext *sl,
+                                                int mb_type)
+ {
+     const int b_stride      = h->b_stride;
+     const int b_xy  = 4 * sl->mb_x + 4 * sl->mb_y * h->b_stride; // try mb2b(8)_xy
+     const int b8_xy = 4 * sl->mb_xy;
+     if (USES_LIST(mb_type, 0)) {
+         write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 0);
+     } else {
+         fill_rectangle(&h->cur_pic.ref_index[0][b8_xy],
+                        2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
+     }
+     if (USES_LIST(mb_type, 1))
+         write_back_motion_list(h, sl, b_stride, b_xy, b8_xy, mb_type, 1);
+     if (sl->slice_type_nos == AV_PICTURE_TYPE_B && CABAC(h)) {
+         if (IS_8X8(mb_type)) {
+             uint8_t *direct_table = &h->direct_table[4 * sl->mb_xy];
+             direct_table[1] = sl->sub_mb_type[1] >> 1;
+             direct_table[2] = sl->sub_mb_type[2] >> 1;
+             direct_table[3] = sl->sub_mb_type[3] >> 1;
+         }
+     }
+ }
+ static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
+ {
+     if (h->ps.sps->direct_8x8_inference_flag)
+         return !(AV_RN64A(sl->sub_mb_type) &
+                  ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
+                   0x0001000100010001ULL));
+     else
+         return !(AV_RN64A(sl->sub_mb_type) &
+                  ((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8 | MB_TYPE_DIRECT2) *
+                   0x0001000100010001ULL));
+ }
++static inline int find_start_code(const uint8_t *buf, int buf_size,
++                           int buf_index, int next_avc)
++{
++    uint32_t state = -1;
++
++    buf_index = avpriv_find_start_code(buf + buf_index, buf + next_avc + 1, &state) - buf - 1;
++
++    return FFMIN(buf_index, buf_size);
++}
++
+ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup);
+ int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src);
+ void ff_h264_unref_picture(H264Context *h, H264Picture *pic);
+ int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl);
+ void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl, int y, int height);
+ int ff_h264_decode_slice_header(H264Context *h, H264SliceContext *sl,
+                                 const H2645NAL *nal);
++#define SLICE_SINGLETHREAD 1
++#define SLICE_SKIPED 2
++
+ int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count);
+ int ff_h264_update_thread_context(AVCodecContext *dst,
+                                   const AVCodecContext *src);
+ void ff_h264_flush_change(H264Context *h);
+ void ff_h264_free_tables(H264Context *h);
++void ff_h264_set_erpic(ERPicture *dst, H264Picture *src);
++
+ #endif /* AVCODEC_H264DEC_H */
Simple merge
index caeb64b,0000000..18f186b
mode 100644,000000..100644
--- /dev/null
@@@ -1,371 -1,0 +1,371 @@@
- #include "h264.h"
 +/*
 + * Android MediaCodec H.264 decoder
 + *
 + * Copyright (c) 2015-2016 Matthieu Bouron <matthieu.bouron stupeflix.com>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#include <stdint.h>
 +#include <string.h>
 +
 +#include "libavutil/avassert.h"
 +#include "libavutil/common.h"
 +#include "libavutil/fifo.h"
 +#include "libavutil/opt.h"
 +#include "libavutil/intreadwrite.h"
 +#include "libavutil/pixfmt.h"
 +#include "libavutil/atomic.h"
 +
 +#include "avcodec.h"
++#include "h264dec.h"
 +#include "internal.h"
 +#include "mediacodecdec.h"
 +#include "mediacodec_wrapper.h"
 +
 +#define CODEC_MIME "video/avc"
 +
 +typedef struct MediaCodecH264DecContext {
 +
 +    MediaCodecDecContext *ctx;
 +
 +    AVBSFContext *bsf;
 +
 +    AVFifoBuffer *fifo;
 +
 +    AVPacket filtered_pkt;
 +
 +} MediaCodecH264DecContext;
 +
 +static av_cold int mediacodec_decode_close(AVCodecContext *avctx)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    ff_mediacodec_dec_close(avctx, s->ctx);
 +    s->ctx = NULL;
 +
 +    av_fifo_free(s->fifo);
 +
 +    av_bsf_free(&s->bsf);
 +    av_packet_unref(&s->filtered_pkt);
 +
 +    return 0;
 +}
 +
 +static int h264_ps_to_nalu(const uint8_t *src, int src_size, uint8_t **out, int *out_size)
 +{
 +    int i;
 +    int ret = 0;
 +    uint8_t *p = NULL;
 +    static const uint8_t nalu_header[] = { 0x00, 0x00, 0x00, 0x01 };
 +
 +    if (!out || !out_size) {
 +        return AVERROR(EINVAL);
 +    }
 +
 +    p = av_malloc(sizeof(nalu_header) + src_size);
 +    if (!p) {
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    *out = p;
 +    *out_size = sizeof(nalu_header) + src_size;
 +
 +    memcpy(p, nalu_header, sizeof(nalu_header));
 +    memcpy(p + sizeof(nalu_header), src, src_size);
 +
 +    /* Escape 0x00, 0x00, 0x0{0-3} pattern */
 +    for (i = 4; i < *out_size; i++) {
 +        if (i < *out_size - 3 &&
 +            p[i + 0] == 0 &&
 +            p[i + 1] == 0 &&
 +            p[i + 2] <= 3) {
 +            uint8_t *new;
 +
 +            *out_size += 1;
 +            new = av_realloc(*out, *out_size);
 +            if (!new) {
 +                ret = AVERROR(ENOMEM);
 +                goto done;
 +            }
 +            *out = p = new;
 +
 +            i = i + 3;
 +            memmove(p + i, p + i - 1, *out_size - i);
 +            p[i - 1] = 0x03;
 +        }
 +    }
 +done:
 +    if (ret < 0) {
 +        av_freep(out);
 +        *out_size = 0;
 +    }
 +
 +    return ret;
 +}
 +
 +static av_cold int mediacodec_decode_init(AVCodecContext *avctx)
 +{
 +    int i;
 +    int ret;
 +
 +    H264ParamSets ps;
 +    const PPS *pps = NULL;
 +    const SPS *sps = NULL;
 +    int is_avc = 0;
 +    int nal_length_size = 0;
 +
 +    FFAMediaFormat *format = NULL;
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    memset(&ps, 0, sizeof(ps));
 +
 +    format = ff_AMediaFormat_new();
 +    if (!format) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to create media format\n");
 +        ret = AVERROR_EXTERNAL;
 +        goto done;
 +    }
 +
 +    ff_AMediaFormat_setString(format, "mime", CODEC_MIME);
 +    ff_AMediaFormat_setInt32(format, "width", avctx->width);
 +    ff_AMediaFormat_setInt32(format, "height", avctx->height);
 +
 +    ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
 +                                   &ps, &is_avc, &nal_length_size, 0, avctx);
 +    if (ret < 0) {
 +        goto done;
 +    }
 +
 +    for (i = 0; i < MAX_PPS_COUNT; i++) {
 +        if (ps.pps_list[i]) {
 +            pps = (const PPS*)ps.pps_list[i]->data;
 +            break;
 +        }
 +    }
 +
 +    if (pps) {
 +        if (ps.sps_list[pps->sps_id]) {
 +            sps = (const SPS*)ps.sps_list[pps->sps_id]->data;
 +        }
 +    }
 +
 +    if (pps && sps) {
 +        uint8_t *data = NULL;
 +        size_t data_size = 0;
 +
 +        if ((ret = h264_ps_to_nalu(sps->data, sps->data_size, &data, &data_size)) < 0) {
 +            goto done;
 +        }
 +        ff_AMediaFormat_setBuffer(format, "csd-0", (void*)data, data_size);
 +        av_freep(&data);
 +
 +        if ((ret = h264_ps_to_nalu(pps->data, pps->data_size, &data, &data_size)) < 0) {
 +            goto done;
 +        }
 +        ff_AMediaFormat_setBuffer(format, "csd-1", (void*)data, data_size);
 +        av_freep(&data);
 +    } else {
 +        av_log(avctx, AV_LOG_ERROR, "Could not extract PPS/SPS from extradata");
 +        ret = AVERROR_INVALIDDATA;
 +        goto done;
 +    }
 +
 +    s->ctx = av_mallocz(sizeof(*s->ctx));
 +    if (!s->ctx) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to allocate MediaCodecDecContext\n");
 +        ret = AVERROR(ENOMEM);
 +        goto done;
 +    }
 +
 +    if ((ret = ff_mediacodec_dec_init(avctx, s->ctx, CODEC_MIME, format)) < 0) {
 +        s->ctx = NULL;
 +        goto done;
 +    }
 +
 +    av_log(avctx, AV_LOG_INFO, "MediaCodec started successfully, ret = %d\n", ret);
 +
 +    s->fifo = av_fifo_alloc(sizeof(AVPacket));
 +    if (!s->fifo) {
 +        ret = AVERROR(ENOMEM);
 +        goto done;
 +    }
 +
 +    const AVBitStreamFilter *bsf = av_bsf_get_by_name("h264_mp4toannexb");
 +    if(!bsf) {
 +        ret = AVERROR_BSF_NOT_FOUND;
 +        goto done;
 +    }
 +
 +    if ((ret = av_bsf_alloc(bsf, &s->bsf))) {
 +        goto done;
 +    }
 +
 +    if (((ret = avcodec_parameters_from_context(s->bsf->par_in, avctx)) < 0) ||
 +        ((ret = av_bsf_init(s->bsf)) < 0)) {
 +          goto done;
 +    }
 +
 +    av_init_packet(&s->filtered_pkt);
 +
 +done:
 +    if (format) {
 +        ff_AMediaFormat_delete(format);
 +    }
 +
 +    if (ret < 0) {
 +        mediacodec_decode_close(avctx);
 +    }
 +
 +    ff_h264_ps_uninit(&ps);
 +
 +    return ret;
 +}
 +
 +
 +static int mediacodec_process_data(AVCodecContext *avctx, AVFrame *frame,
 +                                   int *got_frame, AVPacket *pkt)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    return ff_mediacodec_dec_decode(avctx, s->ctx, frame, got_frame, pkt);
 +}
 +
 +static int mediacodec_decode_frame(AVCodecContext *avctx, void *data,
 +                                   int *got_frame, AVPacket *avpkt)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +    AVFrame *frame    = data;
 +    int ret;
 +
 +    /* buffer the input packet */
 +    if (avpkt->size) {
 +        AVPacket input_pkt = { 0 };
 +
 +        if (av_fifo_space(s->fifo) < sizeof(input_pkt)) {
 +            ret = av_fifo_realloc2(s->fifo,
 +                                   av_fifo_size(s->fifo) + sizeof(input_pkt));
 +            if (ret < 0)
 +                return ret;
 +        }
 +
 +        ret = av_packet_ref(&input_pkt, avpkt);
 +        if (ret < 0)
 +            return ret;
 +        av_fifo_generic_write(s->fifo, &input_pkt, sizeof(input_pkt), NULL);
 +    }
 +
 +    /*
 +     * MediaCodec.flush() discards both input and output buffers, thus we
 +     * need to delay the call to this function until the user has released or
 +     * renderered the frames he retains.
 +     *
 +     * After we have buffered an input packet, check if the codec is in the
 +     * flushing state. If it is, we need to call ff_mediacodec_dec_flush.
 +     *
 +     * ff_mediacodec_dec_flush returns 0 if the flush cannot be performed on
 +     * the codec (because the user retains frames). The codec stays in the
 +     * flushing state.
 +     *
 +     * ff_mediacodec_dec_flush returns 1 if the flush can actually be
 +     * performed on the codec. The codec leaves the flushing state and can
 +     * process again packets.
 +     *
 +     * ff_mediacodec_dec_flush returns a negative value if an error has
 +     * occurred.
 +     *
 +     */
 +    if (ff_mediacodec_dec_is_flushing(avctx, s->ctx)) {
 +        if (!ff_mediacodec_dec_flush(avctx, s->ctx)) {
 +            return avpkt->size;
 +        }
 +    }
 +
 +    /* process buffered data */
 +    while (!*got_frame) {
 +        /* prepare the input data -- convert to Annex B if needed */
 +        if (s->filtered_pkt.size <= 0) {
 +            AVPacket input_pkt = { 0 };
 +
 +            av_packet_unref(&s->filtered_pkt);
 +
 +            /* no more data */
 +            if (av_fifo_size(s->fifo) < sizeof(AVPacket)) {
 +                return avpkt->size ? avpkt->size :
 +                    ff_mediacodec_dec_decode(avctx, s->ctx, frame, got_frame, avpkt);
 +            }
 +
 +            av_fifo_generic_read(s->fifo, &input_pkt, sizeof(input_pkt), NULL);
 +
 +            ret = av_bsf_send_packet(s->bsf, &input_pkt);
 +            if (ret < 0) {
 +                return ret;
 +            }
 +
 +            ret = av_bsf_receive_packet(s->bsf, &s->filtered_pkt);
 +            if (ret == AVERROR(EAGAIN)) {
 +                goto done;
 +            }
 +
 +            /* h264_mp4toannexb is used here and does not requires flushing */
 +            av_assert0(ret != AVERROR_EOF);
 +
 +            if (ret < 0) {
 +                return ret;
 +            }
 +        }
 +
 +        ret = mediacodec_process_data(avctx, frame, got_frame, &s->filtered_pkt);
 +        if (ret < 0)
 +            return ret;
 +
 +        s->filtered_pkt.size -= ret;
 +        s->filtered_pkt.data += ret;
 +    }
 +done:
 +    return avpkt->size;
 +}
 +
 +static void mediacodec_decode_flush(AVCodecContext *avctx)
 +{
 +    MediaCodecH264DecContext *s = avctx->priv_data;
 +
 +    while (av_fifo_size(s->fifo)) {
 +        AVPacket pkt;
 +        av_fifo_generic_read(s->fifo, &pkt, sizeof(pkt), NULL);
 +        av_packet_unref(&pkt);
 +    }
 +    av_fifo_reset(s->fifo);
 +
 +    av_packet_unref(&s->filtered_pkt);
 +
 +    ff_mediacodec_dec_flush(avctx, s->ctx);
 +}
 +
 +AVCodec ff_h264_mediacodec_decoder = {
 +    .name           = "h264_mediacodec",
 +    .long_name      = NULL_IF_CONFIG_SMALL("H.264 Android MediaCodec decoder"),
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .priv_data_size = sizeof(MediaCodecH264DecContext),
 +    .init           = mediacodec_decode_init,
 +    .decode         = mediacodec_decode_frame,
 +    .flush          = mediacodec_decode_flush,
 +    .close          = mediacodec_decode_close,
 +    .capabilities   = CODEC_CAP_DELAY,
 +    .caps_internal  = FF_CODEC_CAP_SETS_PKT_DTS,
 +};
index 0ef6c74,0000000..6e6127d
mode 100644,000000..100644
--- /dev/null
@@@ -1,47 -1,0 +1,47 @@@
- #include "libavcodec/h264.h"
 +/*
 + * Copyright (c) 2015 Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#ifndef AVCODEC_MIPS_H264CHROMA_MIPS_H
 +#define AVCODEC_MIPS_H264CHROMA_MIPS_H
 +
++#include "libavcodec/h264dec.h"
 +void ff_put_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_put_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_put_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_avg_h264_chroma_mc8_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_avg_h264_chroma_mc4_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +void ff_avg_h264_chroma_mc2_msa(uint8_t *dst, uint8_t *src, int stride,
 +                                int height, int x, int y);
 +
 +void ff_put_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +void ff_avg_h264_chroma_mc8_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +void ff_put_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +void ff_avg_h264_chroma_mc4_mmi(uint8_t *dst, uint8_t *src, int stride,
 +        int h, int x, int y);
 +
 +#endif /* AVCODEC_MIPS_H264CHROMA_MIPS_H */
index 2fdfd11,0000000..a578457
mode 100644,000000..100644
--- /dev/null
@@@ -1,577 -1,0 +1,577 @@@
- #include "libavcodec/h264.h"
 +/*
 + * Copyright (c) 2015 Parag Salasakar (Parag.Salasakar@imgtec.com)
 +                      Zhou Xiaoyong <zhouxiaoyong@loongson.cn>
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#ifndef AVCODEC_MIPS_H264DSP_MIPS_H
 +#define AVCODEC_MIPS_H264DSP_MIPS_H
 +
++#include "libavcodec/h264dec.h"
 +#include "constants.h"
 +
 +void ff_h264_h_lpf_luma_inter_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta, int8_t *tc0);
 +void ff_h264_v_lpf_luma_inter_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta, int8_t *tc0);
 +void ff_h264_h_lpf_chroma_inter_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta, int8_t *tc0);
 +void ff_h264_v_lpf_chroma_inter_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta, int8_t *tc0);
 +void ff_h264_h_loop_filter_chroma422_msa(uint8_t *src, int32_t stride,
 +                                         int32_t alpha, int32_t beta,
 +                                         int8_t *tc0);
 +void ff_h264_h_loop_filter_chroma422_mbaff_msa(uint8_t *src, int32_t stride,
 +                                               int32_t alpha, int32_t beta,
 +                                               int8_t *tc0);
 +void ff_h264_h_loop_filter_luma_mbaff_msa(uint8_t *src, int32_t stride,
 +                                          int32_t alpha, int32_t beta,
 +                                          int8_t *tc0);
 +
 +void ff_h264_idct_add_msa(uint8_t *dst, int16_t *src, int32_t dst_stride);
 +void ff_h264_idct4x4_addblk_dc_msa(uint8_t *dst, int16_t *src,
 +                                   int32_t dst_stride);
 +void ff_h264_deq_idct_luma_dc_msa(int16_t *dst, int16_t *src,
 +                                  int32_t de_q_val);
 +void ff_h264_idct_add16_msa(uint8_t *dst, const int32_t *blk_offset,
 +                            int16_t *block, int32_t stride,
 +                            const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct_add16_intra_msa(uint8_t *dst, const int32_t *blk_offset,
 +                                  int16_t *block, int32_t dst_stride,
 +                                  const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct_add8_msa(uint8_t **dst, const int32_t *blk_offset,
 +                           int16_t *block, int32_t dst_stride,
 +                           const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct_add8_422_msa(uint8_t **dst, const int32_t *blk_offset,
 +                               int16_t *block, int32_t dst_stride,
 +                               const uint8_t nnzc[15 * 8]);
 +void ff_h264_idct8_addblk_msa(uint8_t *dst, int16_t *src, int32_t dst_stride);
 +void ff_h264_idct8_dc_addblk_msa(uint8_t *dst, int16_t *src,
 +                                 int32_t dst_stride);
 +void ff_h264_idct8_add4_msa(uint8_t *dst, const int *blk_offset,
 +                            int16_t *blk, int dst_stride,
 +                            const uint8_t nnzc[15 * 8]);
 +
 +void ff_h264_h_lpf_luma_intra_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta);
 +void ff_h264_v_lpf_luma_intra_msa(uint8_t *src, int stride,
 +                                  int alpha, int beta);
 +void ff_h264_h_lpf_chroma_intra_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta);
 +void ff_h264_v_lpf_chroma_intra_msa(uint8_t *src, int stride,
 +                                    int alpha, int beta);
 +void ff_h264_h_loop_filter_luma_mbaff_intra_msa(uint8_t *src, int stride,
 +                                                int alpha, int beta);
 +
 +void ff_biweight_h264_pixels16_8_msa(uint8_t *dst, uint8_t *src,
 +                                     int stride, int height, int log2_denom,
 +                                     int weightd, int weights, int offset);
 +void ff_biweight_h264_pixels8_8_msa(uint8_t *dst, uint8_t *src,
 +                                    int stride, int height, int log2_denom,
 +                                    int weightd, int weights, int offset);
 +void ff_biweight_h264_pixels4_8_msa(uint8_t *dst, uint8_t *src,
 +                                    int stride, int height, int log2_denom,
 +                                    int weightd, int weights, int offset);
 +void ff_weight_h264_pixels16_8_msa(uint8_t *src, int stride, int height,
 +                                   int log2_denom, int weight, int offset);
 +void ff_weight_h264_pixels8_8_msa(uint8_t *src, int stride, int height,
 +                                  int log2_denom, int weight, int offset);
 +void ff_weight_h264_pixels4_8_msa(uint8_t *src, int stride, int height,
 +                                  int log2_denom, int weight, int offset);
 +
 +void ff_put_h264_qpel16_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel8_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel4_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel16_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                 ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel8_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel4_mc00_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc10_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc20_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc30_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc01_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc11_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc21_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc31_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc02_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc12_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc22_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc32_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc03_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc13_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc23_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc33_msa(uint8_t *dst, const uint8_t *src,
 +                                ptrdiff_t dst_stride);
 +
 +void ff_h264_intra_predict_plane_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_dc_4blk_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_hor_dc_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_vert_dc_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_l0t_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_0lt_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_l00_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_mad_cow_dc_0l0_8x8_msa(uint8_t *src,
 +                                                  ptrdiff_t stride);
 +void ff_h264_intra_predict_plane_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_vert_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_horiz_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_vert_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_horiz_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_left_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_top_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_128_8x8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_h264_intra_pred_dc_128_16x16_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred8x8_127_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred8x8_129_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred16x16_127_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +void ff_vp8_pred16x16_129_dc_8_msa(uint8_t *src, ptrdiff_t stride);
 +
 +void ff_h264_add_pixels4_8_mmi(uint8_t *_dst, int16_t *_src, int stride);
 +void ff_h264_idct_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct8_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct8_dc_add_8_mmi(uint8_t *dst, int16_t *block, int stride);
 +void ff_h264_idct_add16_8_mmi(uint8_t *dst, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct_add16intra_8_mmi(uint8_t *dst, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct8_add4_8_mmi(uint8_t *dst, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct_add8_8_mmi(uint8_t **dest, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_idct_add8_422_8_mmi(uint8_t **dest, const int *block_offset,
 +        int16_t *block, int stride, const uint8_t nnzc[15*8]);
 +void ff_h264_luma_dc_dequant_idct_8_mmi(int16_t *output, int16_t *input,
 +        int qmul);
 +void ff_h264_chroma_dc_dequant_idct_8_mmi(int16_t *block, int qmul);
 +void ff_h264_chroma422_dc_dequant_idct_8_mmi(int16_t *block, int qmul);
 +
 +void ff_h264_weight_pixels16_8_mmi(uint8_t *block, int stride, int height,
 +        int log2_denom, int weight, int offset);
 +void ff_h264_biweight_pixels16_8_mmi(uint8_t *dst, uint8_t *src,
 +        int stride, int height, int log2_denom, int weightd, int weights,
 +        int offset);
 +void ff_h264_weight_pixels8_8_mmi(uint8_t *block, int stride, int height,
 +        int log2_denom, int weight, int offset);
 +void ff_h264_biweight_pixels8_8_mmi(uint8_t *dst, uint8_t *src,
 +        int stride, int height, int log2_denom, int weightd, int weights,
 +        int offset);
 +void ff_h264_weight_pixels4_8_mmi(uint8_t *block, int stride, int height,
 +        int log2_denom, int weight, int offset);
 +void ff_h264_biweight_pixels4_8_mmi(uint8_t *dst, uint8_t *src,
 +        int stride, int height, int log2_denom, int weightd, int weights,
 +        int offset);
 +
 +void ff_deblock_v_chroma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_v_chroma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_h_chroma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_h_chroma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_v_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_v_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_h_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_h_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +void ff_deblock_v8_luma_8_mmi(uint8_t *pix, int stride, int alpha, int beta,
 +        int8_t *tc0);
 +void ff_deblock_v8_luma_intra_8_mmi(uint8_t *pix, int stride, int alpha,
 +        int beta);
 +
 +void ff_put_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_put_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_put_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel16_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel16_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel8_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel8_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +void ff_avg_h264_qpel4_mc00_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc10_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc20_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc30_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc01_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc11_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc21_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc31_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc02_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc12_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc22_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc32_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc03_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc13_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc23_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +void ff_avg_h264_qpel4_mc33_mmi(uint8_t *dst, const uint8_t *src,
 +        ptrdiff_t dst_stride);
 +
 +#endif  // #ifndef AVCODEC_MIPS_H264DSP_MIPS_H
Simple merge
  #include "libavutil/ppc/types_altivec.h"
  #include "libavutil/ppc/util_altivec.h"
  
- #include "libavcodec/h264.h"
+ #include "libavcodec/h264dec.h"
  #include "libavcodec/h264dsp.h"
  
 -#if HAVE_ALTIVEC && HAVE_BIGENDIAN
 +#if HAVE_ALTIVEC
  
  /****************************************************************************
   * IDCT transform:
Simple merge
Simple merge
Simple merge
Simple merge
   * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
   */
  
 +#include <CoreFoundation/CFDictionary.h>
  #include <CoreFoundation/CFNumber.h>
  #include <CoreFoundation/CFData.h>
 -#include <CoreFoundation/CFString.h>
  
 +#include "vda.h"
  #include "libavutil/avutil.h"
- #include "h264.h"
+ #include "h264dec.h"
 -#include "internal.h"
 -#include "vda.h"
 -#include "vda_internal.h"
 -
 -typedef struct VDAContext {
 -    // The current bitstream buffer.
 -    uint8_t             *bitstream;
 -
 -    // The current size of the bitstream.
 -    int                  bitstream_size;
 -
 -    // The reference size used for fast reallocation.
 -    int                  allocated_size;
  
 -    CVImageBufferRef frame;
 -} VDAContext;
 +struct vda_buffer {
 +    CVPixelBufferRef cv_buffer;
 +};
 +#include "internal.h"
 +#include "vda_vt_internal.h"
  
 -/* Decoder callback that adds the VDA frame to the queue in display order. */
 +/* Decoder callback that adds the vda frame to the queue in display order. */
  static void vda_decoder_callback(void *vda_hw_ctx,
                                   CFDictionaryRef user_info,
                                   OSStatus status,
index a196eb7,0000000..92839e2
mode 100644,000000..100644
--- /dev/null
@@@ -1,263 -1,0 +1,263 @@@
- #include "h264.h"
 +/*
 + * Copyright (c) 2012, Xidorn Quan
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +/**
 + * @file
 + * H.264 decoder via VDA
 + * @author Xidorn Quan <quanxunzhen@gmail.com>
 + */
 +
 +#include <string.h>
 +#include <CoreFoundation/CoreFoundation.h>
 +
 +#include "vda.h"
++#include "h264dec.h"
 +#include "avcodec.h"
 +
 +#ifndef kCFCoreFoundationVersionNumber10_7
 +#define kCFCoreFoundationVersionNumber10_7      635.00
 +#endif
 +
 +extern AVCodec ff_h264_decoder, ff_h264_vda_decoder;
 +
 +static const enum AVPixelFormat vda_pixfmts_prior_10_7[] = {
 +    AV_PIX_FMT_UYVY422,
 +    AV_PIX_FMT_YUV420P,
 +    AV_PIX_FMT_NONE
 +};
 +
 +static const enum AVPixelFormat vda_pixfmts[] = {
 +    AV_PIX_FMT_UYVY422,
 +    AV_PIX_FMT_YUYV422,
 +    AV_PIX_FMT_NV12,
 +    AV_PIX_FMT_YUV420P,
 +    AV_PIX_FMT_NONE
 +};
 +
 +typedef struct {
 +    H264Context h264ctx;
 +    int h264_initialized;
 +    struct vda_context vda_ctx;
 +    enum AVPixelFormat pix_fmt;
 +
 +    /* for backing-up fields set by user.
 +     * we have to gain full control of such fields here */
 +    void *hwaccel_context;
 +    enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
 +    int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
 +} VDADecoderContext;
 +
 +static enum AVPixelFormat get_format(struct AVCodecContext *avctx,
 +        const enum AVPixelFormat *fmt)
 +{
 +    return AV_PIX_FMT_VDA_VLD;
 +}
 +
 +typedef struct {
 +    CVPixelBufferRef cv_buffer;
 +} VDABufferContext;
 +
 +static void release_buffer(void *opaque, uint8_t *data)
 +{
 +    VDABufferContext *context = opaque;
 +    CVPixelBufferUnlockBaseAddress(context->cv_buffer, 0);
 +    CVPixelBufferRelease(context->cv_buffer);
 +    av_free(context);
 +}
 +
 +static int get_buffer2(AVCodecContext *avctx, AVFrame *pic, int flag)
 +{
 +    VDABufferContext *context = av_mallocz(sizeof(VDABufferContext));
 +    AVBufferRef *buffer = av_buffer_create(NULL, 0, release_buffer, context, 0);
 +    if (!context || !buffer) {
 +        av_free(context);
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    pic->buf[0] = buffer;
 +    pic->data[0] = (void *)1;
 +    return 0;
 +}
 +
 +static inline void set_context(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    ctx->hwaccel_context = avctx->hwaccel_context;
 +    avctx->hwaccel_context = &ctx->vda_ctx;
 +    ctx->get_format = avctx->get_format;
 +    avctx->get_format = get_format;
 +    ctx->get_buffer2 = avctx->get_buffer2;
 +    avctx->get_buffer2 = get_buffer2;
 +}
 +
 +static inline void restore_context(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    avctx->hwaccel_context = ctx->hwaccel_context;
 +    avctx->get_format = ctx->get_format;
 +    avctx->get_buffer2 = ctx->get_buffer2;
 +}
 +
 +static int vdadec_decode(AVCodecContext *avctx,
 +        void *data, int *got_frame, AVPacket *avpkt)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    AVFrame *pic = data;
 +    int ret;
 +
 +    set_context(avctx);
 +    ret = ff_h264_decoder.decode(avctx, data, got_frame, avpkt);
 +    restore_context(avctx);
 +    if (*got_frame) {
 +        AVBufferRef *buffer = pic->buf[0];
 +        VDABufferContext *context = av_buffer_get_opaque(buffer);
 +        CVPixelBufferRef cv_buffer = (CVPixelBufferRef)pic->data[3];
 +
 +        CVPixelBufferRetain(cv_buffer);
 +        CVPixelBufferLockBaseAddress(cv_buffer, 0);
 +        context->cv_buffer = cv_buffer;
 +        pic->format = ctx->pix_fmt;
 +        if (CVPixelBufferIsPlanar(cv_buffer)) {
 +            int i, count = CVPixelBufferGetPlaneCount(cv_buffer);
 +            av_assert0(count < 4);
 +            for (i = 0; i < count; i++) {
 +                pic->data[i] = CVPixelBufferGetBaseAddressOfPlane(cv_buffer, i);
 +                pic->linesize[i] = CVPixelBufferGetBytesPerRowOfPlane(cv_buffer, i);
 +            }
 +        } else {
 +            pic->data[0] = CVPixelBufferGetBaseAddress(cv_buffer);
 +            pic->linesize[0] = CVPixelBufferGetBytesPerRow(cv_buffer);
 +        }
 +    }
 +    avctx->pix_fmt = ctx->pix_fmt;
 +
 +    return ret;
 +}
 +
 +static av_cold int vdadec_close(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    /* release buffers and decoder */
 +    ff_vda_destroy_decoder(&ctx->vda_ctx);
 +    /* close H.264 decoder */
 +    if (ctx->h264_initialized) {
 +        set_context(avctx);
 +        ff_h264_decoder.close(avctx);
 +        restore_context(avctx);
 +    }
 +    return 0;
 +}
 +
 +static av_cold int vdadec_init(AVCodecContext *avctx)
 +{
 +    VDADecoderContext *ctx = avctx->priv_data;
 +    struct vda_context *vda_ctx = &ctx->vda_ctx;
 +    OSStatus status;
 +    int ret, i;
 +
 +    ctx->h264_initialized = 0;
 +
 +    /* init pix_fmts of codec */
 +    if (!ff_h264_vda_decoder.pix_fmts) {
 +        if (kCFCoreFoundationVersionNumber < kCFCoreFoundationVersionNumber10_7)
 +            ff_h264_vda_decoder.pix_fmts = vda_pixfmts_prior_10_7;
 +        else
 +            ff_h264_vda_decoder.pix_fmts = vda_pixfmts;
 +    }
 +
 +    /* init vda */
 +    memset(vda_ctx, 0, sizeof(struct vda_context));
 +    vda_ctx->width = avctx->width;
 +    vda_ctx->height = avctx->height;
 +    vda_ctx->format = 'avc1';
 +    vda_ctx->use_sync_decoding = 1;
 +    vda_ctx->use_ref_buffer = 1;
 +    ctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
 +    switch (ctx->pix_fmt) {
 +    case AV_PIX_FMT_UYVY422:
 +        vda_ctx->cv_pix_fmt_type = '2vuy';
 +        break;
 +    case AV_PIX_FMT_YUYV422:
 +        vda_ctx->cv_pix_fmt_type = 'yuvs';
 +        break;
 +    case AV_PIX_FMT_NV12:
 +        vda_ctx->cv_pix_fmt_type = '420v';
 +        break;
 +    case AV_PIX_FMT_YUV420P:
 +        vda_ctx->cv_pix_fmt_type = 'y420';
 +        break;
 +    default:
 +        av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format: %d\n", avctx->pix_fmt);
 +        goto failed;
 +    }
 +    status = ff_vda_create_decoder(vda_ctx,
 +                                   avctx->extradata, avctx->extradata_size);
 +    if (status != kVDADecoderNoErr) {
 +        av_log(avctx, AV_LOG_ERROR,
 +                "Failed to init VDA decoder: %d.\n", status);
 +        goto failed;
 +    }
 +
 +    /* init H.264 decoder */
 +    set_context(avctx);
 +    ret = ff_h264_decoder.init(avctx);
 +    restore_context(avctx);
 +    if (ret < 0) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to open H.264 decoder.\n");
 +        goto failed;
 +    }
 +    ctx->h264_initialized = 1;
 +
 +    for (i = 0; i < MAX_SPS_COUNT; i++) {
 +        const SPS *sps = (const SPS*)ctx->h264ctx.ps.sps_list[i]->data;
 +        if (sps && (sps->bit_depth_luma != 8 ||
 +                sps->chroma_format_idc == 2 ||
 +                sps->chroma_format_idc == 3)) {
 +            av_log(avctx, AV_LOG_ERROR, "Format is not supported.\n");
 +            goto failed;
 +        }
 +    }
 +
 +    return 0;
 +
 +failed:
 +    vdadec_close(avctx);
 +    return -1;
 +}
 +
 +static void vdadec_flush(AVCodecContext *avctx)
 +{
 +    set_context(avctx);
 +    ff_h264_decoder.flush(avctx);
 +    restore_context(avctx);
 +}
 +
 +AVCodec ff_h264_vda_decoder = {
 +    .name           = "h264_vda",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .priv_data_size = sizeof(VDADecoderContext),
 +    .init           = vdadec_init,
 +    .close          = vdadec_close,
 +    .decode         = vdadec_decode,
 +    .capabilities   = AV_CODEC_CAP_DELAY,
 +    .flush          = vdadec_flush,
 +    .long_name      = NULL_IF_CONFIG_SMALL("H.264 (VDA acceleration)"),
 +};
  
  #include "avcodec.h"
  #include "internal.h"
- #include "h264.h"
+ #include "h264dec.h"
  #include "vc1.h"
  #include "vdpau.h"
 +#include "vdpau_compat.h"
  #include "vdpau_internal.h"
  
 +// XXX: at the time of adding this ifdefery, av_assert* wasn't use outside.
 +// When dropping it, make sure other av_assert* were not added since then.
 +#if FF_API_BUFS_VDPAU
 +#include "libavutil/avassert.h"
 +#endif
 +
 +#if FF_API_VDPAU
 +#undef NDEBUG
 +#include <assert.h>
 +#endif
 +
  /**
   * @addtogroup VDPAU_Decoding
   *
index 6b4b086,0000000..768acce
mode 100644,000000..100644
--- /dev/null
@@@ -1,48 -1,0 +1,48 @@@
- #include "h264.h"
 +/*
 + * Video Decode and Presentation API for UNIX (VDPAU) is used for
 + * HW decode acceleration for MPEG-1/2, H.264 and VC-1.
 + *
 + * Copyright (C) 2008 NVIDIA
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#ifndef AVCODEC_VDPAU_COMPAT_H
 +#define AVCODEC_VDPAU_COMPAT_H
 +
 +#include <stdint.h>
 +
++#include "h264dec.h"
 +#include "mpeg4video.h"
 +
 +void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf,
 +                             int buf_size);
 +
 +void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
 +                                    int buf_size, int slice_count);
 +
 +void ff_vdpau_h264_picture_start(H264Context *h);
 +void ff_vdpau_h264_set_reference_frames(H264Context *h);
 +void ff_vdpau_h264_picture_complete(H264Context *h);
 +
 +void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
 +                                 int buf_size);
 +
 +void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *s, const uint8_t *buf,
 +                                   int buf_size);
 +
 +#endif /* AVCODEC_VDPAU_COMPAT_H */
Simple merge
index c2c621d,0000000..1288aa5
mode 100644,000000..100644
--- /dev/null
@@@ -1,701 -1,0 +1,701 @@@
- #include "h264.h"
 +/*
 + * Videotoolbox hardware acceleration
 + *
 + * copyright (c) 2012 Sebastien Zwickert
 + *
 + * This file is part of FFmpeg.
 + *
 + * FFmpeg is free software; you can redistribute it and/or
 + * modify it under the terms of the GNU Lesser General Public
 + * License as published by the Free Software Foundation; either
 + * version 2.1 of the License, or (at your option) any later version.
 + *
 + * FFmpeg is distributed in the hope that it will be useful,
 + * but WITHOUT ANY WARRANTY; without even the implied warranty of
 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
 + * Lesser General Public License for more details.
 + *
 + * You should have received a copy of the GNU Lesser General Public
 + * License along with FFmpeg; if not, write to the Free Software
 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
 + */
 +
 +#include "config.h"
 +#if CONFIG_VIDEOTOOLBOX
 +#  include "videotoolbox.h"
 +#else
 +#  include "vda.h"
 +#endif
 +#include "vda_vt_internal.h"
 +#include "libavutil/avutil.h"
 +#include "bytestream.h"
++#include "h264dec.h"
 +#include "mpegvideo.h"
 +
 +#ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
 +#  define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
 +#endif
 +
 +#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING  12
 +
 +static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
 +{
 +    CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
 +    CVPixelBufferRelease(cv_buffer);
 +}
 +
 +static int videotoolbox_buffer_copy(VTContext *vtctx,
 +                                    const uint8_t *buffer,
 +                                    uint32_t size)
 +{
 +    void *tmp;
 +
 +    tmp = av_fast_realloc(vtctx->bitstream,
 +                         &vtctx->allocated_size,
 +                         size);
 +
 +    if (!tmp)
 +        return AVERROR(ENOMEM);
 +
 +    vtctx->bitstream = tmp;
 +    memcpy(vtctx->bitstream, buffer, size);
 +    vtctx->bitstream_size = size;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
 +{
 +    frame->width  = avctx->width;
 +    frame->height = avctx->height;
 +    frame->format = avctx->pix_fmt;
 +    frame->buf[0] = av_buffer_alloc(1);
 +
 +    if (!frame->buf[0])
 +        return AVERROR(ENOMEM);
 +
 +    return 0;
 +}
 +
 +#define AV_W8(p, v) *(p) = (v)
 +
 +CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
 +{
 +    H264Context *h     = avctx->priv_data;
 +    CFDataRef data = NULL;
 +    uint8_t *p;
 +    int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
 +    uint8_t *vt_extradata = av_malloc(vt_extradata_size);
 +    if (!vt_extradata)
 +        return NULL;
 +
 +    p = vt_extradata;
 +
 +    AV_W8(p + 0, 1); /* version */
 +    AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
 +    AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
 +    AV_W8(p + 3, h->ps.sps->data[3]); /* level */
 +    AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
 +    AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
 +    AV_WB16(p + 6, h->ps.sps->data_size);
 +    memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
 +    p += 8 + h->ps.sps->data_size;
 +    AV_W8(p + 0, 1); /* number of pps */
 +    AV_WB16(p + 1, h->ps.pps->data_size);
 +    memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
 +
 +    p += 3 + h->ps.pps->data_size;
 +    av_assert0(p - vt_extradata == vt_extradata_size);
 +
 +    data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
 +    av_free(vt_extradata);
 +    return data;
 +}
 +
 +int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
 +{
 +    av_buffer_unref(&frame->buf[0]);
 +
 +    frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
 +                                     sizeof(vtctx->frame),
 +                                     videotoolbox_buffer_release,
 +                                     NULL,
 +                                     AV_BUFFER_FLAG_READONLY);
 +    if (!frame->buf[0]) {
 +        return AVERROR(ENOMEM);
 +    }
 +
 +    frame->data[3] = (uint8_t*)vtctx->frame;
 +    vtctx->frame = NULL;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
 +                                     const uint8_t *buffer,
 +                                     uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    H264Context *h  = avctx->priv_data;
 +
 +    vtctx->bitstream_size = 0;
 +
 +    if (h->is_avc == 1) {
 +        return videotoolbox_buffer_copy(vtctx, buffer, size);
 +    }
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
 +                                      const uint8_t *buffer,
 +                                      uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    H264Context *h  = avctx->priv_data;
 +    void *tmp;
 +
 +    if (h->is_avc == 1)
 +        return 0;
 +
 +    tmp = av_fast_realloc(vtctx->bitstream,
 +                          &vtctx->allocated_size,
 +                          vtctx->bitstream_size+size+4);
 +    if (!tmp)
 +        return AVERROR(ENOMEM);
 +
 +    vtctx->bitstream = tmp;
 +
 +    AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
 +    memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
 +
 +    vtctx->bitstream_size += size + 4;
 +
 +    return 0;
 +}
 +
 +int ff_videotoolbox_uninit(AVCodecContext *avctx)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +    if (vtctx) {
 +        av_freep(&vtctx->bitstream);
 +        if (vtctx->frame)
 +            CVPixelBufferRelease(vtctx->frame);
 +    }
 +
 +    return 0;
 +}
 +
 +#if CONFIG_VIDEOTOOLBOX
 +static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
 +{
 +    int i;
 +    uint8_t b;
 +
 +    for (i = 3; i >= 0; i--) {
 +        b = (length >> (i * 7)) & 0x7F;
 +        if (i != 0)
 +            b |= 0x80;
 +
 +        bytestream2_put_byteu(pb, b);
 +    }
 +}
 +
 +static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
 +{
 +    CFDataRef data;
 +    uint8_t *rw_extradata;
 +    PutByteContext pb;
 +    int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
 +    // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
 +    int config_size = 13 + 5 + avctx->extradata_size;
 +    int s;
 +
 +    if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
 +        return NULL;
 +
 +    bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
 +    bytestream2_put_byteu(&pb, 0);        // version
 +    bytestream2_put_ne24(&pb, 0);         // flags
 +
 +    // elementary stream descriptor
 +    bytestream2_put_byteu(&pb, 0x03);     // ES_DescrTag
 +    videotoolbox_write_mp4_descr_length(&pb, full_size);
 +    bytestream2_put_ne16(&pb, 0);         // esid
 +    bytestream2_put_byteu(&pb, 0);        // stream priority (0-32)
 +
 +    // decoder configuration descriptor
 +    bytestream2_put_byteu(&pb, 0x04);     // DecoderConfigDescrTag
 +    videotoolbox_write_mp4_descr_length(&pb, config_size);
 +    bytestream2_put_byteu(&pb, 32);       // object type indication. 32 = AV_CODEC_ID_MPEG4
 +    bytestream2_put_byteu(&pb, 0x11);     // stream type
 +    bytestream2_put_ne24(&pb, 0);         // buffer size
 +    bytestream2_put_ne32(&pb, 0);         // max bitrate
 +    bytestream2_put_ne32(&pb, 0);         // avg bitrate
 +
 +    // decoder specific descriptor
 +    bytestream2_put_byteu(&pb, 0x05);     ///< DecSpecificInfoTag
 +    videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
 +
 +    bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
 +
 +    // SLConfigDescriptor
 +    bytestream2_put_byteu(&pb, 0x06);     // SLConfigDescrTag
 +    bytestream2_put_byteu(&pb, 0x01);     // length
 +    bytestream2_put_byteu(&pb, 0x02);     //
 +
 +    s = bytestream2_size_p(&pb);
 +
 +    data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
 +
 +    av_freep(&rw_extradata);
 +    return data;
 +}
 +
 +static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
 +                                                           void *buffer,
 +                                                           int size)
 +{
 +    OSStatus status;
 +    CMBlockBufferRef  block_buf;
 +    CMSampleBufferRef sample_buf;
 +
 +    block_buf  = NULL;
 +    sample_buf = NULL;
 +
 +    status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
 +                                                buffer,             // memoryBlock
 +                                                size,               // blockLength
 +                                                kCFAllocatorNull,   // blockAllocator
 +                                                NULL,               // customBlockSource
 +                                                0,                  // offsetToData
 +                                                size,               // dataLength
 +                                                0,                  // flags
 +                                                &block_buf);
 +
 +    if (!status) {
 +        status = CMSampleBufferCreate(kCFAllocatorDefault,  // allocator
 +                                      block_buf,            // dataBuffer
 +                                      TRUE,                 // dataReady
 +                                      0,                    // makeDataReadyCallback
 +                                      0,                    // makeDataReadyRefcon
 +                                      fmt_desc,             // formatDescription
 +                                      1,                    // numSamples
 +                                      0,                    // numSampleTimingEntries
 +                                      NULL,                 // sampleTimingArray
 +                                      0,                    // numSampleSizeEntries
 +                                      NULL,                 // sampleSizeArray
 +                                      &sample_buf);
 +    }
 +
 +    if (block_buf)
 +        CFRelease(block_buf);
 +
 +    return sample_buf;
 +}
 +
 +static void videotoolbox_decoder_callback(void *opaque,
 +                                          void *sourceFrameRefCon,
 +                                          OSStatus status,
 +                                          VTDecodeInfoFlags flags,
 +                                          CVImageBufferRef image_buffer,
 +                                          CMTime pts,
 +                                          CMTime duration)
 +{
 +    AVCodecContext *avctx = opaque;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    if (vtctx->frame) {
 +        CVPixelBufferRelease(vtctx->frame);
 +        vtctx->frame = NULL;
 +    }
 +
 +    if (!image_buffer) {
 +        av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
 +        return;
 +    }
 +
 +    vtctx->frame = CVPixelBufferRetain(image_buffer);
 +}
 +
 +static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
 +{
 +    OSStatus status;
 +    CMSampleBufferRef sample_buf;
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
 +                                                   vtctx->bitstream,
 +                                                   vtctx->bitstream_size);
 +
 +    if (!sample_buf)
 +        return -1;
 +
 +    status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
 +                                               sample_buf,
 +                                               0,       // decodeFlags
 +                                               NULL,    // sourceFrameRefCon
 +                                               0);      // infoFlagsOut
 +    if (status == noErr)
 +        status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
 +
 +    CFRelease(sample_buf);
 +
 +    return status;
 +}
 +
 +static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
 +{
 +    int status;
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    av_buffer_unref(&frame->buf[0]);
 +
 +    if (!videotoolbox->session || !vtctx->bitstream)
 +        return AVERROR_INVALIDDATA;
 +
 +    status = videotoolbox_session_decode_frame(avctx);
 +
 +    if (status) {
 +        av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
 +        return AVERROR_UNKNOWN;
 +    }
 +
 +    if (!vtctx->frame)
 +        return AVERROR_UNKNOWN;
 +
 +    return ff_videotoolbox_buffer_create(vtctx, frame);
 +}
 +
 +static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
 +{
 +    H264Context *h = avctx->priv_data;
 +    AVFrame *frame = h->cur_pic_ptr->f;
 +
 +    return videotoolbox_common_end_frame(avctx, frame);
 +}
 +
 +static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
 +                                         const uint8_t *buffer,
 +                                         uint32_t size)
 +{
 +    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 +
 +    return videotoolbox_buffer_copy(vtctx, buffer, size);
 +}
 +
 +static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
 +                                          const uint8_t *buffer,
 +                                          uint32_t size)
 +{
 +    return 0;
 +}
 +
 +static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
 +{
 +    MpegEncContext *s = avctx->priv_data;
 +    AVFrame *frame = s->current_picture_ptr->f;
 +
 +    return videotoolbox_common_end_frame(avctx, frame);
 +}
 +
 +static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
 +                                                          AVCodecContext *avctx)
 +{
 +    CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                                   0,
 +                                                                   &kCFTypeDictionaryKeyCallBacks,
 +                                                                   &kCFTypeDictionaryValueCallBacks);
 +
 +    CFDictionarySetValue(config_info,
 +                         kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
 +                         kCFBooleanTrue);
 +
 +    if (avctx->extradata_size) {
 +        CFMutableDictionaryRef avc_info;
 +        CFDataRef data = NULL;
 +
 +        avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                             1,
 +                                             &kCFTypeDictionaryKeyCallBacks,
 +                                             &kCFTypeDictionaryValueCallBacks);
 +
 +        switch (codec_type) {
 +        case kCMVideoCodecType_MPEG4Video :
 +            data = videotoolbox_esds_extradata_create(avctx);
 +            if (data)
 +                CFDictionarySetValue(avc_info, CFSTR("esds"), data);
 +            break;
 +        case kCMVideoCodecType_H264 :
 +            data = ff_videotoolbox_avcc_extradata_create(avctx);
 +            if (data)
 +                CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
 +            break;
 +        default:
 +            break;
 +        }
 +
 +        CFDictionarySetValue(config_info,
 +                kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
 +                avc_info);
 +
 +        if (data)
 +            CFRelease(data);
 +
 +        CFRelease(avc_info);
 +    }
 +    return config_info;
 +}
 +
 +static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
 +                                                             int height,
 +                                                             OSType pix_fmt)
 +{
 +    CFMutableDictionaryRef buffer_attributes;
 +    CFMutableDictionaryRef io_surface_properties;
 +    CFNumberRef cv_pix_fmt;
 +    CFNumberRef w;
 +    CFNumberRef h;
 +
 +    w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
 +    h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
 +    cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
 +
 +    buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                  4,
 +                                                  &kCFTypeDictionaryKeyCallBacks,
 +                                                  &kCFTypeDictionaryValueCallBacks);
 +    io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
 +                                                      0,
 +                                                      &kCFTypeDictionaryKeyCallBacks,
 +                                                      &kCFTypeDictionaryValueCallBacks);
 +
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
 +    CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
 +
 +    CFRelease(io_surface_properties);
 +    CFRelease(cv_pix_fmt);
 +    CFRelease(w);
 +    CFRelease(h);
 +
 +    return buffer_attributes;
 +}
 +
 +static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
 +                                                                   CFDictionaryRef decoder_spec,
 +                                                                   int width,
 +                                                                   int height)
 +{
 +    CMFormatDescriptionRef cm_fmt_desc;
 +    OSStatus status;
 +
 +    status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
 +                                            codec_type,
 +                                            width,
 +                                            height,
 +                                            decoder_spec, // Dictionary of extension
 +                                            &cm_fmt_desc);
 +
 +    if (status)
 +        return NULL;
 +
 +    return cm_fmt_desc;
 +}
 +
 +static int videotoolbox_default_init(AVCodecContext *avctx)
 +{
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +    OSStatus status;
 +    VTDecompressionOutputCallbackRecord decoder_cb;
 +    CFDictionaryRef decoder_spec;
 +    CFDictionaryRef buf_attr;
 +
 +    if (!videotoolbox) {
 +        av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
 +        return -1;
 +    }
 +
 +    switch( avctx->codec_id ) {
 +    case AV_CODEC_ID_H263 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
 +        break;
 +    case AV_CODEC_ID_H264 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
 +        break;
 +    case AV_CODEC_ID_MPEG1VIDEO :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
 +        break;
 +    case AV_CODEC_ID_MPEG2VIDEO :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
 +        break;
 +    case AV_CODEC_ID_MPEG4 :
 +        videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
 +        break;
 +    default :
 +        break;
 +    }
 +
 +    decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
 +
 +    videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
 +                                                                decoder_spec,
 +                                                                avctx->width,
 +                                                                avctx->height);
 +    if (!videotoolbox->cm_fmt_desc) {
 +        if (decoder_spec)
 +            CFRelease(decoder_spec);
 +
 +        av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
 +        return -1;
 +    }
 +
 +    buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
 +                                                     avctx->height,
 +                                                     videotoolbox->cv_pix_fmt_type);
 +
 +    decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
 +    decoder_cb.decompressionOutputRefCon   = avctx;
 +
 +    status = VTDecompressionSessionCreate(NULL,                      // allocator
 +                                          videotoolbox->cm_fmt_desc, // videoFormatDescription
 +                                          decoder_spec,              // videoDecoderSpecification
 +                                          buf_attr,                  // destinationImageBufferAttributes
 +                                          &decoder_cb,               // outputCallback
 +                                          &videotoolbox->session);   // decompressionSessionOut
 +
 +    if (decoder_spec)
 +        CFRelease(decoder_spec);
 +    if (buf_attr)
 +        CFRelease(buf_attr);
 +
 +    switch (status) {
 +    case kVTVideoDecoderNotAvailableNowErr:
 +    case kVTVideoDecoderUnsupportedDataFormatErr:
 +        return AVERROR(ENOSYS);
 +    case kVTVideoDecoderMalfunctionErr:
 +        return AVERROR(EINVAL);
 +    case kVTVideoDecoderBadDataErr :
 +        return AVERROR_INVALIDDATA;
 +    case 0:
 +        return 0;
 +    default:
 +        return AVERROR_UNKNOWN;
 +    }
 +}
 +
 +static void videotoolbox_default_free(AVCodecContext *avctx)
 +{
 +    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
 +
 +    if (videotoolbox) {
 +        if (videotoolbox->cm_fmt_desc)
 +            CFRelease(videotoolbox->cm_fmt_desc);
 +
 +        if (videotoolbox->session) {
 +            VTDecompressionSessionInvalidate(videotoolbox->session);
 +            CFRelease(videotoolbox->session);
 +        }
 +    }
 +}
 +
 +AVHWAccel ff_h263_videotoolbox_hwaccel = {
 +    .name           = "h263_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H263,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_h264_videotoolbox_hwaccel = {
 +    .name           = "h264_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_H264,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = ff_videotoolbox_h264_start_frame,
 +    .decode_slice   = ff_videotoolbox_h264_decode_slice,
 +    .end_frame      = videotoolbox_h264_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
 +    .name           = "mpeg1_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG1VIDEO,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
 +    .name           = "mpeg2_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG2VIDEO,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
 +    .name           = "mpeg4_videotoolbox",
 +    .type           = AVMEDIA_TYPE_VIDEO,
 +    .id             = AV_CODEC_ID_MPEG4,
 +    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
 +    .alloc_frame    = ff_videotoolbox_alloc_frame,
 +    .start_frame    = videotoolbox_mpeg_start_frame,
 +    .decode_slice   = videotoolbox_mpeg_decode_slice,
 +    .end_frame      = videotoolbox_mpeg_end_frame,
 +    .uninit         = ff_videotoolbox_uninit,
 +    .priv_data_size = sizeof(VTContext),
 +};
 +
 +AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
 +{
 +    AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
 +
 +    if (ret) {
 +        ret->output_callback = videotoolbox_decoder_callback;
 +        ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
 +    }
 +
 +    return ret;
 +}
 +
 +int av_videotoolbox_default_init(AVCodecContext *avctx)
 +{
 +    return av_videotoolbox_default_init2(avctx, NULL);
 +}
 +
 +int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
 +{
 +    avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
 +    if (!avctx->hwaccel_context)
 +        return AVERROR(ENOMEM);
 +    return videotoolbox_default_init(avctx);
 +}
 +
 +void av_videotoolbox_default_free(AVCodecContext *avctx)
 +{
 +
 +    videotoolbox_default_free(avctx);
 +    av_freep(&avctx->hwaccel_context);
 +}
 +#endif /* CONFIG_VIDEOTOOLBOX */
Simple merge
  #include <math.h>
  #include <time.h>
  
 +#include "libavutil/opt.h"
  #include "libavutil/random_seed.h"
 +#include "libavutil/timecode.h"
 +#include "libavutil/avassert.h"
 +#include "libavutil/pixdesc.h"
  #include "libavutil/time_internal.h"
  #include "libavcodec/bytestream.h"
- #include "libavcodec/h264.h"
 +#include "libavcodec/dnxhddata.h"
++#include "libavcodec/h264dec.h"
 +#include "libavcodec/internal.h"
  #include "audiointerleave.h"
  #include "avformat.h"
 +#include "avio_internal.h"
  #include "internal.h"
  #include "mxf.h"
 -
 -static const int NTSC_samples_per_frame[] = { 1602, 1601, 1602, 1601, 1602, 0 };
 -static const int PAL_samples_per_frame[]  = { 1920, 0 };
 +#include "config.h"
  
  extern AVOutputFormat ff_mxf_d10_muxer;
 +extern AVOutputFormat ff_mxf_opatom_muxer;
  
  #define EDIT_UNITS_PER_BODY 250
  #define KAG_SIZE 512