avcodec/videotoolbox: add logging context to log
[ffmpeg.git] / libavcodec / videotoolbox.c
index cc1e592..8773de3 100644 (file)
  */
 
 #include "config.h"
-#if CONFIG_VIDEOTOOLBOX
-#  include "videotoolbox.h"
-#else
-#  include "vda.h"
-#endif
-#include "vda_vt_internal.h"
+#include "videotoolbox.h"
+#include "libavutil/hwcontext_videotoolbox.h"
+#include "vt_internal.h"
 #include "libavutil/avutil.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/pixdesc.h"
 #include "bytestream.h"
-#include "h264.h"
+#include "decode.h"
+#include "h264dec.h"
+#include "hevcdec.h"
 #include "mpegvideo.h"
+#include <TargetConditionals.h>
 
 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
 #  define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
 #endif
+#ifndef kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder
+#  define kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder CFSTR("EnableHardwareAcceleratedVideoDecoder")
+#endif
+
+#if !HAVE_KCMVIDEOCODECTYPE_HEVC
+enum { kCMVideoCodecType_HEVC = 'hvc1' };
+#endif
 
 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING  12
 
+typedef struct VTHWFrame {
+    CVPixelBufferRef pixbuf;
+    AVBufferRef *hw_frames_ctx;
+} VTHWFrame;
+
 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
 {
-    CVPixelBufferRef cv_buffer = (CVImageBufferRef)data;
-    CVPixelBufferRelease(cv_buffer);
+    VTHWFrame *ref = (VTHWFrame *)data;
+    av_buffer_unref(&ref->hw_frames_ctx);
+    CVPixelBufferRelease(ref->pixbuf);
+
+    av_free(data);
 }
 
 static int videotoolbox_buffer_copy(VTContext *vtctx,
@@ -64,15 +81,59 @@ static int videotoolbox_buffer_copy(VTContext *vtctx,
     return 0;
 }
 
+static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
+{
+    VTHWFrame *ref = (VTHWFrame *)frame->buf[0]->data;
+
+    if (!ref->pixbuf) {
+        av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
+        av_frame_unref(frame);
+        return AVERROR_EXTERNAL;
+    }
+
+    frame->crop_right = 0;
+    frame->crop_left = 0;
+    frame->crop_top = 0;
+    frame->crop_bottom = 0;
+
+    frame->data[3] = (uint8_t*)ref->pixbuf;
+
+    if (ref->hw_frames_ctx) {
+        av_buffer_unref(&frame->hw_frames_ctx);
+        frame->hw_frames_ctx = av_buffer_ref(ref->hw_frames_ctx);
+        if (!frame->hw_frames_ctx)
+            return AVERROR(ENOMEM);
+    }
+
+    return 0;
+}
+
 int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
 {
+    size_t      size = sizeof(VTHWFrame);
+    uint8_t    *data = NULL;
+    AVBufferRef *buf = NULL;
+    int ret = ff_attach_decode_data(frame);
+    FrameDecodeData *fdd;
+    if (ret < 0)
+        return ret;
+
+    data = av_mallocz(size);
+    if (!data)
+        return AVERROR(ENOMEM);
+    buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
+    if (!buf) {
+        av_freep(&data);
+        return AVERROR(ENOMEM);
+    }
+    frame->buf[0] = buf;
+
+    fdd = (FrameDecodeData*)frame->private_ref->data;
+    fdd->post_process = videotoolbox_postproc_frame;
+
     frame->width  = avctx->width;
     frame->height = avctx->height;
     frame->format = avctx->pix_fmt;
-    frame->buf[0] = av_buffer_alloc(1);
-
-    if (!frame->buf[0])
-        return AVERROR(ENOMEM);
 
     return 0;
 }
@@ -81,10 +142,11 @@ int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
 
 CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
 {
-    H264Context *h     = avctx->priv_data;
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+    H264Context *h = avctx->priv_data;
     CFDataRef data = NULL;
     uint8_t *p;
-    int vt_extradata_size = 6 + 3 + h->sps.data_size + 4 + h->pps.data_size;
+    int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
     uint8_t *vt_extradata = av_malloc(vt_extradata_size);
     if (!vt_extradata)
         return NULL;
@@ -92,45 +154,181 @@ CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
     p = vt_extradata;
 
     AV_W8(p + 0, 1); /* version */
-    AV_W8(p + 1, h->sps.data[0]); /* profile */
-    AV_W8(p + 2, h->sps.data[1]); /* profile compat */
-    AV_W8(p + 3, h->sps.data[2]); /* level */
+    AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
+    AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
+    AV_W8(p + 3, h->ps.sps->data[3]); /* level */
     AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
     AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
-    AV_WB16(p + 6, h->sps.data_size + 1);
-    AV_W8(p + 8, NAL_SPS | (3 << 5)); // NAL unit header
-    memcpy(p + 9, h->sps.data, h->sps.data_size);
-    p += 9 + h->sps.data_size;
+    AV_WB16(p + 6, h->ps.sps->data_size);
+    memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
+    p += 8 + h->ps.sps->data_size;
     AV_W8(p + 0, 1); /* number of pps */
-    AV_WB16(p + 1, h->pps.data_size + 1);
-    AV_W8(p + 3, NAL_PPS | (3 << 5)); // NAL unit header
-    memcpy(p + 4, h->pps.data, h->pps.data_size);
+    AV_WB16(p + 1, h->ps.pps->data_size);
+    memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
 
-    p += 4 + h->pps.data_size;
+    p += 3 + h->ps.pps->data_size;
     av_assert0(p - vt_extradata == vt_extradata_size);
 
+    // save sps header (profile/level) used to create decoder session,
+    // so we can detect changes and recreate it.
+    if (vtctx)
+        memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
+
     data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
     av_free(vt_extradata);
     return data;
 }
 
-int ff_videotoolbox_buffer_create(VTContext *vtctx, AVFrame *frame)
+CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
 {
-    av_buffer_unref(&frame->buf[0]);
+    HEVCContext *h = avctx->priv_data;
+    int i, num_vps = 0, num_sps = 0, num_pps = 0;
+    const HEVCVPS *vps = h->ps.vps;
+    const HEVCSPS *sps = h->ps.sps;
+    const HEVCPPS *pps = h->ps.pps;
+    PTLCommon ptlc = vps->ptl.general_ptl;
+    VUI vui = sps->vui;
+    uint8_t parallelismType;
+    CFDataRef data = NULL;
+    uint8_t *p;
+    int vt_extradata_size = 23 + 3 + 3 + 3;
+    uint8_t *vt_extradata;
+
+#define COUNT_SIZE_PS(T, t) \
+    for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
+        if (h->ps.t##ps_list[i]) { \
+            const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
+            vt_extradata_size += 2 + lps->data_size; \
+            num_##t##ps++; \
+        } \
+    }
 
-    frame->buf[0] = av_buffer_create((uint8_t*)vtctx->frame,
-                                     sizeof(vtctx->frame),
-                                     videotoolbox_buffer_release,
-                                     NULL,
-                                     AV_BUFFER_FLAG_READONLY);
-    if (!frame->buf[0]) {
-        return AVERROR(ENOMEM);
+    COUNT_SIZE_PS(V, v)
+    COUNT_SIZE_PS(S, s)
+    COUNT_SIZE_PS(P, p)
+
+    vt_extradata = av_malloc(vt_extradata_size);
+    if (!vt_extradata)
+        return NULL;
+    p = vt_extradata;
+
+    /* unsigned int(8) configurationVersion = 1; */
+    AV_W8(p + 0, 1);
+
+    /*
+     * unsigned int(2) general_profile_space;
+     * unsigned int(1) general_tier_flag;
+     * unsigned int(5) general_profile_idc;
+     */
+    AV_W8(p + 1, ptlc.profile_space << 6 |
+                 ptlc.tier_flag     << 5 |
+                 ptlc.profile_idc);
+
+    /* unsigned int(32) general_profile_compatibility_flags; */
+    memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
+
+    /* unsigned int(48) general_constraint_indicator_flags; */
+    AV_W8(p + 6, ptlc.progressive_source_flag    << 7 |
+                 ptlc.interlaced_source_flag     << 6 |
+                 ptlc.non_packed_constraint_flag << 5 |
+                 ptlc.frame_only_constraint_flag << 4);
+    AV_W8(p + 7, 0);
+    AV_WN32(p + 8, 0);
+
+    /* unsigned int(8) general_level_idc; */
+    AV_W8(p + 12, ptlc.level_idc);
+
+    /*
+     * bit(4) reserved = ‘1111’b;
+     * unsigned int(12) min_spatial_segmentation_idc;
+     */
+    AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
+    AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
+
+    /*
+     * bit(6) reserved = ‘111111’b;
+     * unsigned int(2) parallelismType;
+     */
+    if (!vui.min_spatial_segmentation_idc)
+        parallelismType = 0;
+    else if (pps->entropy_coding_sync_enabled_flag && pps->tiles_enabled_flag)
+        parallelismType = 0;
+    else if (pps->entropy_coding_sync_enabled_flag)
+        parallelismType = 3;
+    else if (pps->tiles_enabled_flag)
+        parallelismType = 2;
+    else
+        parallelismType = 1;
+    AV_W8(p + 15, 0xfc | parallelismType);
+
+    /*
+     * bit(6) reserved = ‘111111’b;
+     * unsigned int(2) chromaFormat;
+     */
+    AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
+
+    /*
+     * bit(5) reserved = ‘11111’b;
+     * unsigned int(3) bitDepthLumaMinus8;
+     */
+    AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
+
+    /*
+     * bit(5) reserved = ‘11111’b;
+     * unsigned int(3) bitDepthChromaMinus8;
+     */
+    AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
+
+    /* bit(16) avgFrameRate; */
+    AV_WB16(p + 19, 0);
+
+    /*
+     * bit(2) constantFrameRate;
+     * bit(3) numTemporalLayers;
+     * bit(1) temporalIdNested;
+     * unsigned int(2) lengthSizeMinusOne;
+     */
+    AV_W8(p + 21, 0                             << 6 |
+                  sps->max_sub_layers           << 3 |
+                  sps->temporal_id_nesting_flag << 2 |
+                  3);
+
+    /* unsigned int(8) numOfArrays; */
+    AV_W8(p + 22, 3);
+
+    p += 23;
+
+#define APPEND_PS(T, t) \
+    /* \
+     * bit(1) array_completeness; \
+     * unsigned int(1) reserved = 0; \
+     * unsigned int(6) NAL_unit_type; \
+     */ \
+    AV_W8(p, 1 << 7 | \
+             HEVC_NAL_##T##PS & 0x3f); \
+    /* unsigned int(16) numNalus; */ \
+    AV_WB16(p + 1, num_##t##ps); \
+    p += 3; \
+    for (i = 0; i < HEVC_MAX_##T##PS_COUNT; i++) { \
+        if (h->ps.t##ps_list[i]) { \
+            const HEVC##T##PS *lps = (const HEVC##T##PS *)h->ps.t##ps_list[i]->data; \
+            /* unsigned int(16) nalUnitLength; */ \
+            AV_WB16(p, lps->data_size); \
+            /* bit(8*nalUnitLength) nalUnit; */ \
+            memcpy(p + 2, lps->data, lps->data_size); \
+            p += 2 + lps->data_size; \
+        } \
     }
 
-    frame->data[3] = (uint8_t*)vtctx->frame;
-    vtctx->frame = NULL;
+    APPEND_PS(V, v)
+    APPEND_PS(S, s)
+    APPEND_PS(P, p)
 
-    return 0;
+    av_assert0(p - vt_extradata == vt_extradata_size);
+
+    data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
+    av_free(vt_extradata);
+    return data;
 }
 
 int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
@@ -138,9 +336,7 @@ int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
                                      uint32_t size)
 {
     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
-    H264Context *h  = avctx->priv_data;
-
-    vtctx->bitstream_size = 0;
+    H264Context *h = avctx->priv_data;
 
     if (h->is_avc == 1) {
         return videotoolbox_buffer_copy(vtctx, buffer, size);
@@ -149,16 +345,35 @@ int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
     return 0;
 }
 
-int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
-                                      const uint8_t *buffer,
-                                      uint32_t size)
+static int videotoolbox_h264_decode_params(AVCodecContext *avctx,
+                                           int type,
+                                           const uint8_t *buffer,
+                                           uint32_t size)
 {
     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
-    H264Context *h  = avctx->priv_data;
-    void *tmp;
+    H264Context *h = avctx->priv_data;
 
-    if (h->is_avc == 1)
-        return 0;
+    // save sps header (profile/level) used to create decoder session
+    if (!vtctx->sps[0])
+        memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
+
+    if (type == H264_NAL_SPS) {
+        if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
+            vtctx->reconfig_needed = true;
+            memcpy(vtctx->sps, buffer + 1, 3);
+        }
+    }
+
+    // pass-through SPS/PPS changes to the decoder
+    return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
+}
+
+static int videotoolbox_common_decode_slice(AVCodecContext *avctx,
+                                            const uint8_t *buffer,
+                                            uint32_t size)
+{
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+    void *tmp;
 
     tmp = av_fast_realloc(vtctx->bitstream,
                           &vtctx->allocated_size,
@@ -176,6 +391,18 @@ int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
     return 0;
 }
 
+int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
+                                      const uint8_t *buffer,
+                                      uint32_t size)
+{
+    H264Context *h = avctx->priv_data;
+
+    if (h->is_avc == 1)
+        return 0;
+
+    return videotoolbox_common_decode_slice(avctx, buffer, size);
+}
+
 int ff_videotoolbox_uninit(AVCodecContext *avctx)
 {
     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
@@ -189,6 +416,83 @@ int ff_videotoolbox_uninit(AVCodecContext *avctx)
 }
 
 #if CONFIG_VIDEOTOOLBOX
+// Return the AVVideotoolboxContext that matters currently. Where it comes from
+// depends on the API used.
+static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
+{
+    // Somewhat tricky because the user can call av_videotoolbox_default_free()
+    // at any time, even when the codec is closed.
+    if (avctx->internal && avctx->internal->hwaccel_priv_data) {
+        VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+        if (vtctx->vt_ctx)
+            return vtctx->vt_ctx;
+    }
+    return avctx->hwaccel_context;
+}
+
+static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
+{
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+    CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
+    OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
+    enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
+    int width = CVPixelBufferGetWidth(pixbuf);
+    int height = CVPixelBufferGetHeight(pixbuf);
+    AVHWFramesContext *cached_frames;
+    VTHWFrame *ref;
+    int ret;
+
+    if (!frame->buf[0] || frame->data[3]) {
+        av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
+        av_frame_unref(frame);
+        return AVERROR_EXTERNAL;
+    }
+
+    ref = (VTHWFrame *)frame->buf[0]->data;
+
+    if (ref->pixbuf)
+        CVPixelBufferRelease(ref->pixbuf);
+    ref->pixbuf = vtctx->frame;
+    vtctx->frame = NULL;
+
+    // Old API code path.
+    if (!vtctx->cached_hw_frames_ctx)
+        return 0;
+
+    cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
+
+    if (cached_frames->sw_format != sw_format ||
+        cached_frames->width != width ||
+        cached_frames->height != height) {
+        AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
+        AVHWFramesContext *hw_frames;
+        if (!hw_frames_ctx)
+            return AVERROR(ENOMEM);
+
+        hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
+        hw_frames->format = cached_frames->format;
+        hw_frames->sw_format = sw_format;
+        hw_frames->width = width;
+        hw_frames->height = height;
+
+        ret = av_hwframe_ctx_init(hw_frames_ctx);
+        if (ret < 0) {
+            av_buffer_unref(&hw_frames_ctx);
+            return ret;
+        }
+
+        av_buffer_unref(&vtctx->cached_hw_frames_ctx);
+        vtctx->cached_hw_frames_ctx = hw_frames_ctx;
+    }
+
+    av_buffer_unref(&ref->hw_frames_ctx);
+    ref->hw_frames_ctx = av_buffer_ref(vtctx->cached_hw_frames_ctx);
+    if (!ref->hw_frames_ctx)
+        return AVERROR(ENOMEM);
+
+    return 0;
+}
+
 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
 {
     int i;
@@ -313,7 +617,7 @@ static void videotoolbox_decoder_callback(void *opaque,
     }
 
     if (!image_buffer) {
-        av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
+        av_log(avctx, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
         return;
     }
 
@@ -324,7 +628,7 @@ static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
 {
     OSStatus status;
     CMSampleBufferRef sample_buf;
-    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
+    AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
     VTContext *vtctx = avctx->internal->hwaccel_priv_data;
 
     sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
@@ -347,106 +651,25 @@ static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
     return status;
 }
 
-static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
-{
-    int status;
-    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
-    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
-
-    if (!videotoolbox->session || !vtctx->bitstream)
-        return AVERROR_INVALIDDATA;
-
-    status = videotoolbox_session_decode_frame(avctx);
-
-    if (status) {
-        av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
-        return AVERROR_UNKNOWN;
-    }
-
-    if (!vtctx->frame)
-        return AVERROR_UNKNOWN;
-
-    return ff_videotoolbox_buffer_create(vtctx, frame);
-}
-
-static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
-{
-    H264Context *h = avctx->priv_data;
-    AVFrame *frame = h->cur_pic_ptr->f;
-
-    return videotoolbox_common_end_frame(avctx, frame);
-}
-
-static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
-                                         const uint8_t *buffer,
-                                         uint32_t size)
-{
-    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
-
-    return videotoolbox_buffer_copy(vtctx, buffer, size);
-}
-
-static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
-                                          const uint8_t *buffer,
-                                          uint32_t size)
-{
-    return 0;
-}
-
-static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
-{
-    MpegEncContext *s = avctx->priv_data;
-    AVFrame *frame = s->current_picture_ptr->f;
-
-    return videotoolbox_common_end_frame(avctx, frame);
-}
-
-static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
-                                                          AVCodecContext *avctx)
+static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
+                                                                   CFDictionaryRef decoder_spec,
+                                                                   int width,
+                                                                   int height)
 {
-    CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
-                                                                   1,
-                                                                   &kCFTypeDictionaryKeyCallBacks,
-                                                                   &kCFTypeDictionaryValueCallBacks);
-
-    CFDictionarySetValue(config_info,
-                         kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
-                         kCFBooleanTrue);
-
-    if (avctx->extradata_size) {
-        CFMutableDictionaryRef avc_info;
-        CFDataRef data = NULL;
-
-        avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
-                                             1,
-                                             &kCFTypeDictionaryKeyCallBacks,
-                                             &kCFTypeDictionaryValueCallBacks);
-
-        switch (codec_type) {
-        case kCMVideoCodecType_MPEG4Video :
-            data = videotoolbox_esds_extradata_create(avctx);
-            if (data)
-                CFDictionarySetValue(avc_info, CFSTR("esds"), data);
-            break;
-        case kCMVideoCodecType_H264 :
-            data = ff_videotoolbox_avcc_extradata_create(avctx);
-            if (data)
-                CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
-            break;
-        default:
-            break;
-        }
+    CMFormatDescriptionRef cm_fmt_desc;
+    OSStatus status;
 
-        CFDictionarySetValue(config_info,
-                kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
-                avc_info);
+    status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
+                                            codec_type,
+                                            width,
+                                            height,
+                                            decoder_spec, // Dictionary of extension
+                                            &cm_fmt_desc);
 
-        if (data)
-            CFRelease(data);
+    if (status)
+        return NULL;
 
-        CFRelease(avc_info);
-    }
-    return config_info;
+    return cm_fmt_desc;
 }
 
 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
@@ -472,10 +695,16 @@ static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
                                                       &kCFTypeDictionaryKeyCallBacks,
                                                       &kCFTypeDictionaryValueCallBacks);
 
-    CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
+    if (pix_fmt)
+        CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
     CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
     CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
     CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
+#if TARGET_OS_IPHONE
+    CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
+#else
+    CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
+#endif
 
     CFRelease(io_surface_properties);
     CFRelease(cv_pix_fmt);
@@ -485,30 +714,63 @@ static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
     return buffer_attributes;
 }
 
-static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
-                                                                   CFDictionaryRef decoder_spec,
-                                                                   int width,
-                                                                   int height)
+static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
+                                                          AVCodecContext *avctx)
 {
-    CMFormatDescriptionRef cm_fmt_desc;
-    OSStatus status;
+    CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
+                                                                   0,
+                                                                   &kCFTypeDictionaryKeyCallBacks,
+                                                                   &kCFTypeDictionaryValueCallBacks);
 
-    status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
-                                            codec_type,
-                                            width,
-                                            height,
-                                            decoder_spec, // Dictionary of extension
-                                            &cm_fmt_desc);
+    CFDictionarySetValue(config_info,
+                         codec_type == kCMVideoCodecType_HEVC ?
+                            kVTVideoDecoderSpecification_EnableHardwareAcceleratedVideoDecoder :
+                            kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder,
+                         kCFBooleanTrue);
 
-    if (status)
-        return NULL;
+    CFMutableDictionaryRef avc_info;
+    CFDataRef data = NULL;
 
-    return cm_fmt_desc;
+    avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
+                                         1,
+                                         &kCFTypeDictionaryKeyCallBacks,
+                                         &kCFTypeDictionaryValueCallBacks);
+
+    switch (codec_type) {
+    case kCMVideoCodecType_MPEG4Video :
+        if (avctx->extradata_size)
+            data = videotoolbox_esds_extradata_create(avctx);
+        if (data)
+            CFDictionarySetValue(avc_info, CFSTR("esds"), data);
+        break;
+    case kCMVideoCodecType_H264 :
+        data = ff_videotoolbox_avcc_extradata_create(avctx);
+        if (data)
+            CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
+        break;
+    case kCMVideoCodecType_HEVC :
+        data = ff_videotoolbox_hvcc_extradata_create(avctx);
+        if (data)
+            CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
+        break;
+    default:
+        break;
+    }
+
+    CFDictionarySetValue(config_info,
+            kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
+            avc_info);
+
+    if (data)
+        CFRelease(data);
+
+    CFRelease(avc_info);
+    return config_info;
 }
 
-static int videotoolbox_default_init(AVCodecContext *avctx)
+static int videotoolbox_start(AVCodecContext *avctx)
 {
-    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
+    AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
     OSStatus status;
     VTDecompressionOutputCallbackRecord decoder_cb;
     CFDictionaryRef decoder_spec;
@@ -526,6 +788,9 @@ static int videotoolbox_default_init(AVCodecContext *avctx)
     case AV_CODEC_ID_H264 :
         videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
         break;
+    case AV_CODEC_ID_HEVC :
+        videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
+        break;
     case AV_CODEC_ID_MPEG1VIDEO :
         videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
         break;
@@ -541,6 +806,11 @@ static int videotoolbox_default_init(AVCodecContext *avctx)
 
     decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
 
+    if (!decoder_spec) {
+        av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
+        return -1;
+    }
+
     videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
                                                                 decoder_spec,
                                                                 avctx->width,
@@ -574,33 +844,280 @@ static int videotoolbox_default_init(AVCodecContext *avctx)
 
     switch (status) {
     case kVTVideoDecoderNotAvailableNowErr:
+        av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
+        return AVERROR(ENOSYS);
     case kVTVideoDecoderUnsupportedDataFormatErr:
+        av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
+        return AVERROR(ENOSYS);
+    case kVTCouldNotFindVideoDecoderErr:
+        av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder for this format not found.\n");
         return AVERROR(ENOSYS);
     case kVTVideoDecoderMalfunctionErr:
+        av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
         return AVERROR(EINVAL);
-    case kVTVideoDecoderBadDataErr :
+    case kVTVideoDecoderBadDataErr:
+        av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
         return AVERROR_INVALIDDATA;
     case 0:
         return 0;
     default:
+        av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %d\n", (int)status);
         return AVERROR_UNKNOWN;
     }
 }
 
-static void videotoolbox_default_free(AVCodecContext *avctx)
+static void videotoolbox_stop(AVCodecContext *avctx)
 {
-    AVVideotoolboxContext *videotoolbox = avctx->hwaccel_context;
+    AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
+    if (!videotoolbox)
+        return;
 
-    if (videotoolbox) {
-        if (videotoolbox->cm_fmt_desc)
-            CFRelease(videotoolbox->cm_fmt_desc);
+    if (videotoolbox->cm_fmt_desc) {
+        CFRelease(videotoolbox->cm_fmt_desc);
+        videotoolbox->cm_fmt_desc = NULL;
+    }
 
-        if (videotoolbox->session)
-            VTDecompressionSessionInvalidate(videotoolbox->session);
+    if (videotoolbox->session) {
+        VTDecompressionSessionInvalidate(videotoolbox->session);
+        CFRelease(videotoolbox->session);
+        videotoolbox->session = NULL;
     }
 }
 
-AVHWAccel ff_h263_videotoolbox_hwaccel = {
+static const char *videotoolbox_error_string(OSStatus status)
+{
+    switch (status) {
+        case kVTVideoDecoderBadDataErr:
+            return "bad data";
+        case kVTVideoDecoderMalfunctionErr:
+            return "decoder malfunction";
+        case kVTInvalidSessionErr:
+            return "invalid session";
+    }
+    return "unknown";
+}
+
+static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+    OSStatus status;
+    AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+
+    if (vtctx->reconfig_needed == true) {
+        vtctx->reconfig_needed = false;
+        av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
+        videotoolbox_stop(avctx);
+        if (videotoolbox_start(avctx) != 0) {
+            return AVERROR_EXTERNAL;
+        }
+    }
+
+    if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
+        return AVERROR_INVALIDDATA;
+
+    status = videotoolbox_session_decode_frame(avctx);
+    if (status != noErr) {
+        if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
+            vtctx->reconfig_needed = true;
+        av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
+        return AVERROR_UNKNOWN;
+    }
+
+    if (!vtctx->frame) {
+        vtctx->reconfig_needed = true;
+        return AVERROR_UNKNOWN;
+    }
+
+    return videotoolbox_buffer_create(avctx, frame);
+}
+
+static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
+{
+    H264Context *h = avctx->priv_data;
+    AVFrame *frame = h->cur_pic_ptr->f;
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+    int ret = videotoolbox_common_end_frame(avctx, frame);
+    vtctx->bitstream_size = 0;
+    return ret;
+}
+
+static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
+                                         const uint8_t *buffer,
+                                         uint32_t size)
+{
+    return 0;
+}
+
+static int videotoolbox_hevc_decode_slice(AVCodecContext *avctx,
+                                          const uint8_t *buffer,
+                                          uint32_t size)
+{
+    return videotoolbox_common_decode_slice(avctx, buffer, size);
+}
+
+
+static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
+                                           int type,
+                                           const uint8_t *buffer,
+                                           uint32_t size)
+{
+    return videotoolbox_common_decode_slice(avctx, buffer, size);
+}
+
+static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
+{
+    HEVCContext *h = avctx->priv_data;
+    AVFrame *frame = h->ref->frame;
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+
+    h->output_frame->crop_right = 0;
+    h->output_frame->crop_left = 0;
+    h->output_frame->crop_top = 0;
+    h->output_frame->crop_bottom = 0;
+
+    int ret = videotoolbox_common_end_frame(avctx, frame);
+    vtctx->bitstream_size = 0;
+    return ret;
+}
+
+static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
+                                         const uint8_t *buffer,
+                                         uint32_t size)
+{
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+
+    return videotoolbox_buffer_copy(vtctx, buffer, size);
+}
+
+static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
+                                          const uint8_t *buffer,
+                                          uint32_t size)
+{
+    return 0;
+}
+
+static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
+{
+    MpegEncContext *s = avctx->priv_data;
+    AVFrame *frame = s->current_picture_ptr->f;
+
+    return videotoolbox_common_end_frame(avctx, frame);
+}
+
+static int videotoolbox_uninit(AVCodecContext *avctx)
+{
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+    if (!vtctx)
+        return 0;
+
+    ff_videotoolbox_uninit(avctx);
+
+    if (vtctx->vt_ctx)
+        videotoolbox_stop(avctx);
+
+    av_buffer_unref(&vtctx->cached_hw_frames_ctx);
+    av_freep(&vtctx->vt_ctx);
+
+    return 0;
+}
+
+static enum AVPixelFormat videotoolbox_best_pixel_format(AVCodecContext *avctx) {
+    const AVPixFmtDescriptor *descriptor = av_pix_fmt_desc_get(avctx->pix_fmt);
+    if (!descriptor)
+        return AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
+
+    int depth = descriptor->comp[0].depth;
+    if (depth > 8) {
+        return AV_PIX_FMT_P010;
+    }
+
+    return AV_PIX_FMT_NV12;
+}
+
+static int videotoolbox_common_init(AVCodecContext *avctx)
+{
+    VTContext *vtctx = avctx->internal->hwaccel_priv_data;
+    AVHWFramesContext *hw_frames;
+    int err;
+
+    // Old API - do nothing.
+    if (avctx->hwaccel_context)
+        return 0;
+
+    if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
+        av_log(avctx, AV_LOG_ERROR,
+               "Either hw_frames_ctx or hw_device_ctx must be set.\n");
+        return AVERROR(EINVAL);
+    }
+
+    vtctx->vt_ctx = av_videotoolbox_alloc_context();
+    if (!vtctx->vt_ctx) {
+        err = AVERROR(ENOMEM);
+        goto fail;
+    }
+
+    if (avctx->hw_frames_ctx) {
+        hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+    } else {
+        avctx->hw_frames_ctx = av_hwframe_ctx_alloc(avctx->hw_device_ctx);
+        if (!avctx->hw_frames_ctx) {
+            err = AVERROR(ENOMEM);
+            goto fail;
+        }
+
+        hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
+        hw_frames->sw_format = videotoolbox_best_pixel_format(avctx);
+        hw_frames->width = avctx->width;
+        hw_frames->height = avctx->height;
+
+        err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
+        if (err < 0) {
+            av_buffer_unref(&avctx->hw_frames_ctx);
+            goto fail;
+        }
+    }
+
+    vtctx->cached_hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
+    if (!vtctx->cached_hw_frames_ctx) {
+        err = AVERROR(ENOMEM);
+        goto fail;
+    }
+
+    bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
+    vtctx->vt_ctx->cv_pix_fmt_type =
+        av_map_videotoolbox_format_from_pixfmt2(hw_frames->sw_format, full_range);
+    if (!vtctx->vt_ctx->cv_pix_fmt_type) {
+        av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
+        err = AVERROR(EINVAL);
+        goto fail;
+    }
+
+    err = videotoolbox_start(avctx);
+    if (err < 0)
+        goto fail;
+
+    return 0;
+
+fail:
+    videotoolbox_uninit(avctx);
+    return err;
+}
+
+static int videotoolbox_frame_params(AVCodecContext *avctx,
+                                     AVBufferRef *hw_frames_ctx)
+{
+    AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
+
+    frames_ctx->format            = AV_PIX_FMT_VIDEOTOOLBOX;
+    frames_ctx->width             = avctx->coded_width;
+    frames_ctx->height            = avctx->coded_height;
+    frames_ctx->sw_format         = videotoolbox_best_pixel_format(avctx);
+
+    return 0;
+}
+
+const AVHWAccel ff_h263_videotoolbox_hwaccel = {
     .name           = "h263_videotoolbox",
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H263,
@@ -609,11 +1126,29 @@ AVHWAccel ff_h263_videotoolbox_hwaccel = {
     .start_frame    = videotoolbox_mpeg_start_frame,
     .decode_slice   = videotoolbox_mpeg_decode_slice,
     .end_frame      = videotoolbox_mpeg_end_frame,
-    .uninit         = ff_videotoolbox_uninit,
+    .frame_params   = videotoolbox_frame_params,
+    .init           = videotoolbox_common_init,
+    .uninit         = videotoolbox_uninit,
+    .priv_data_size = sizeof(VTContext),
+};
+
+const AVHWAccel ff_hevc_videotoolbox_hwaccel = {
+    .name           = "hevc_videotoolbox",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = AV_CODEC_ID_HEVC,
+    .pix_fmt        = AV_PIX_FMT_VIDEOTOOLBOX,
+    .alloc_frame    = ff_videotoolbox_alloc_frame,
+    .start_frame    = videotoolbox_hevc_start_frame,
+    .decode_slice   = videotoolbox_hevc_decode_slice,
+    .decode_params  = videotoolbox_hevc_decode_params,
+    .end_frame      = videotoolbox_hevc_end_frame,
+    .frame_params   = videotoolbox_frame_params,
+    .init           = videotoolbox_common_init,
+    .uninit         = videotoolbox_uninit,
     .priv_data_size = sizeof(VTContext),
 };
 
-AVHWAccel ff_h264_videotoolbox_hwaccel = {
+const AVHWAccel ff_h264_videotoolbox_hwaccel = {
     .name           = "h264_videotoolbox",
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H264,
@@ -621,12 +1156,15 @@ AVHWAccel ff_h264_videotoolbox_hwaccel = {
     .alloc_frame    = ff_videotoolbox_alloc_frame,
     .start_frame    = ff_videotoolbox_h264_start_frame,
     .decode_slice   = ff_videotoolbox_h264_decode_slice,
+    .decode_params  = videotoolbox_h264_decode_params,
     .end_frame      = videotoolbox_h264_end_frame,
-    .uninit         = ff_videotoolbox_uninit,
+    .frame_params   = videotoolbox_frame_params,
+    .init           = videotoolbox_common_init,
+    .uninit         = videotoolbox_uninit,
     .priv_data_size = sizeof(VTContext),
 };
 
-AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
+const AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
     .name           = "mpeg1_videotoolbox",
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MPEG1VIDEO,
@@ -635,11 +1173,13 @@ AVHWAccel ff_mpeg1_videotoolbox_hwaccel = {
     .start_frame    = videotoolbox_mpeg_start_frame,
     .decode_slice   = videotoolbox_mpeg_decode_slice,
     .end_frame      = videotoolbox_mpeg_end_frame,
-    .uninit         = ff_videotoolbox_uninit,
+    .frame_params   = videotoolbox_frame_params,
+    .init           = videotoolbox_common_init,
+    .uninit         = videotoolbox_uninit,
     .priv_data_size = sizeof(VTContext),
 };
 
-AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
+const AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
     .name           = "mpeg2_videotoolbox",
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MPEG2VIDEO,
@@ -648,11 +1188,13 @@ AVHWAccel ff_mpeg2_videotoolbox_hwaccel = {
     .start_frame    = videotoolbox_mpeg_start_frame,
     .decode_slice   = videotoolbox_mpeg_decode_slice,
     .end_frame      = videotoolbox_mpeg_end_frame,
-    .uninit         = ff_videotoolbox_uninit,
+    .frame_params   = videotoolbox_frame_params,
+    .init           = videotoolbox_common_init,
+    .uninit         = videotoolbox_uninit,
     .priv_data_size = sizeof(VTContext),
 };
 
-AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
+const AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
     .name           = "mpeg4_videotoolbox",
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MPEG4,
@@ -661,22 +1203,35 @@ AVHWAccel ff_mpeg4_videotoolbox_hwaccel = {
     .start_frame    = videotoolbox_mpeg_start_frame,
     .decode_slice   = videotoolbox_mpeg_decode_slice,
     .end_frame      = videotoolbox_mpeg_end_frame,
-    .uninit         = ff_videotoolbox_uninit,
+    .frame_params   = videotoolbox_frame_params,
+    .init           = videotoolbox_common_init,
+    .uninit         = videotoolbox_uninit,
     .priv_data_size = sizeof(VTContext),
 };
 
-AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
+static AVVideotoolboxContext *av_videotoolbox_alloc_context_with_pix_fmt(enum AVPixelFormat pix_fmt,
+                                                                         bool full_range)
 {
     AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
 
     if (ret) {
         ret->output_callback = videotoolbox_decoder_callback;
-        ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
+
+        OSType cv_pix_fmt_type = av_map_videotoolbox_format_from_pixfmt2(pix_fmt, full_range);
+        if (cv_pix_fmt_type == 0) {
+            cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
+        }
+        ret->cv_pix_fmt_type = cv_pix_fmt_type;
     }
 
     return ret;
 }
 
+AVVideotoolboxContext *av_videotoolbox_alloc_context(void)
+{
+    return av_videotoolbox_alloc_context_with_pix_fmt(AV_PIX_FMT_NONE, false);
+}
+
 int av_videotoolbox_default_init(AVCodecContext *avctx)
 {
     return av_videotoolbox_default_init2(avctx, NULL);
@@ -684,16 +1239,18 @@ int av_videotoolbox_default_init(AVCodecContext *avctx)
 
 int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
 {
-    avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
+    enum AVPixelFormat pix_fmt = videotoolbox_best_pixel_format(avctx);
+    bool full_range = avctx->color_range == AVCOL_RANGE_JPEG;
+    avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context_with_pix_fmt(pix_fmt, full_range);
     if (!avctx->hwaccel_context)
         return AVERROR(ENOMEM);
-    return videotoolbox_default_init(avctx);
+    return videotoolbox_start(avctx);
 }
 
 void av_videotoolbox_default_free(AVCodecContext *avctx)
 {
 
-    videotoolbox_default_free(avctx);
+    videotoolbox_stop(avctx);
     av_freep(&avctx->hwaccel_context);
 }
 #endif /* CONFIG_VIDEOTOOLBOX */