4 * Copyright (C) 2012 - 2013 Guillaume Martres
5 * Copyright (C) 2012 - 2013 Mickael Raulet
6 * Copyright (C) 2012 - 2013 Gildas Cocherel
7 * Copyright (C) 2012 - 2013 Wassim Hamidouche
9 * This file is part of Libav.
11 * Libav is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * Libav is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with Libav; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/attributes.h"
27 #include "libavutil/common.h"
28 #include "libavutil/display.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/md5.h"
31 #include "libavutil/opt.h"
32 #include "libavutil/pixdesc.h"
33 #include "libavutil/stereo3d.h"
36 #include "bytestream.h"
37 #include "cabac_functions.h"
38 #include "golomb_legacy.h"
40 #include "hevc_data.h"
44 const uint8_t ff_hevc_qpel_extra_before[4] = { 0, 3, 3, 3 };
45 const uint8_t ff_hevc_qpel_extra_after[4] = { 0, 4, 4, 4 };
46 const uint8_t ff_hevc_qpel_extra[4] = { 0, 7, 7, 7 };
48 static const uint8_t scan_1x1[1] = { 0 };
50 static const uint8_t horiz_scan2x2_x[4] = { 0, 1, 0, 1 };
52 static const uint8_t horiz_scan2x2_y[4] = { 0, 0, 1, 1 };
54 static const uint8_t horiz_scan4x4_x[16] = {
61 static const uint8_t horiz_scan4x4_y[16] = {
68 static const uint8_t horiz_scan8x8_inv[8][8] = {
69 { 0, 1, 2, 3, 16, 17, 18, 19, },
70 { 4, 5, 6, 7, 20, 21, 22, 23, },
71 { 8, 9, 10, 11, 24, 25, 26, 27, },
72 { 12, 13, 14, 15, 28, 29, 30, 31, },
73 { 32, 33, 34, 35, 48, 49, 50, 51, },
74 { 36, 37, 38, 39, 52, 53, 54, 55, },
75 { 40, 41, 42, 43, 56, 57, 58, 59, },
76 { 44, 45, 46, 47, 60, 61, 62, 63, },
79 static const uint8_t diag_scan2x2_x[4] = { 0, 0, 1, 1 };
81 static const uint8_t diag_scan2x2_y[4] = { 0, 1, 0, 1 };
83 static const uint8_t diag_scan2x2_inv[2][2] = {
88 static const uint8_t diag_scan4x4_inv[4][4] = {
95 static const uint8_t diag_scan8x8_inv[8][8] = {
96 { 0, 2, 5, 9, 14, 20, 27, 35, },
97 { 1, 4, 8, 13, 19, 26, 34, 42, },
98 { 3, 7, 12, 18, 25, 33, 41, 48, },
99 { 6, 11, 17, 24, 32, 40, 47, 53, },
100 { 10, 16, 23, 31, 39, 46, 52, 57, },
101 { 15, 22, 30, 38, 45, 51, 56, 60, },
102 { 21, 29, 37, 44, 50, 55, 59, 62, },
103 { 28, 36, 43, 49, 54, 58, 61, 63, },
107 * NOTE: Each function hls_foo correspond to the function foo in the
108 * specification (HLS stands for High Level Syntax).
115 /* free everything allocated by pic_arrays_init() */
116 static void pic_arrays_free(HEVCContext *s)
119 av_freep(&s->deblock);
121 av_freep(&s->skip_flag);
122 av_freep(&s->tab_ct_depth);
124 av_freep(&s->tab_ipm);
125 av_freep(&s->cbf_luma);
126 av_freep(&s->is_pcm);
128 av_freep(&s->qp_y_tab);
129 av_freep(&s->tab_slice_address);
130 av_freep(&s->filter_slice_edges);
132 av_freep(&s->horizontal_bs);
133 av_freep(&s->vertical_bs);
135 av_buffer_pool_uninit(&s->tab_mvf_pool);
136 av_buffer_pool_uninit(&s->rpl_tab_pool);
139 /* allocate arrays that depend on frame dimensions */
140 static int pic_arrays_init(HEVCContext *s, const HEVCSPS *sps)
142 int log2_min_cb_size = sps->log2_min_cb_size;
143 int width = sps->width;
144 int height = sps->height;
145 int pic_size_in_ctb = ((width >> log2_min_cb_size) + 1) *
146 ((height >> log2_min_cb_size) + 1);
147 int ctb_count = sps->ctb_width * sps->ctb_height;
148 int min_pu_size = sps->min_pu_width * sps->min_pu_height;
150 s->bs_width = width >> 3;
151 s->bs_height = height >> 3;
153 s->sao = av_mallocz_array(ctb_count, sizeof(*s->sao));
154 s->deblock = av_mallocz_array(ctb_count, sizeof(*s->deblock));
155 if (!s->sao || !s->deblock)
158 s->skip_flag = av_malloc(pic_size_in_ctb);
159 s->tab_ct_depth = av_malloc(sps->min_cb_height * sps->min_cb_width);
160 if (!s->skip_flag || !s->tab_ct_depth)
163 s->cbf_luma = av_malloc(sps->min_tb_width * sps->min_tb_height);
164 s->tab_ipm = av_mallocz(min_pu_size);
165 s->is_pcm = av_malloc(min_pu_size);
166 if (!s->tab_ipm || !s->cbf_luma || !s->is_pcm)
169 s->filter_slice_edges = av_malloc(ctb_count);
170 s->tab_slice_address = av_malloc(pic_size_in_ctb *
171 sizeof(*s->tab_slice_address));
172 s->qp_y_tab = av_malloc(pic_size_in_ctb *
173 sizeof(*s->qp_y_tab));
174 if (!s->qp_y_tab || !s->filter_slice_edges || !s->tab_slice_address)
177 s->horizontal_bs = av_mallocz(2 * s->bs_width * (s->bs_height + 1));
178 s->vertical_bs = av_mallocz(2 * s->bs_width * (s->bs_height + 1));
179 if (!s->horizontal_bs || !s->vertical_bs)
182 s->tab_mvf_pool = av_buffer_pool_init(min_pu_size * sizeof(MvField),
184 s->rpl_tab_pool = av_buffer_pool_init(ctb_count * sizeof(RefPicListTab),
186 if (!s->tab_mvf_pool || !s->rpl_tab_pool)
193 return AVERROR(ENOMEM);
196 static void pred_weight_table(HEVCContext *s, GetBitContext *gb)
200 uint8_t luma_weight_l0_flag[16];
201 uint8_t chroma_weight_l0_flag[16];
202 uint8_t luma_weight_l1_flag[16];
203 uint8_t chroma_weight_l1_flag[16];
205 s->sh.luma_log2_weight_denom = av_clip(get_ue_golomb_long(gb), 0, 7);
206 if (s->ps.sps->chroma_format_idc != 0) {
207 int delta = get_se_golomb(gb);
208 s->sh.chroma_log2_weight_denom = av_clip(s->sh.luma_log2_weight_denom + delta, 0, 7);
211 for (i = 0; i < s->sh.nb_refs[L0]; i++) {
212 luma_weight_l0_flag[i] = get_bits1(gb);
213 if (!luma_weight_l0_flag[i]) {
214 s->sh.luma_weight_l0[i] = 1 << s->sh.luma_log2_weight_denom;
215 s->sh.luma_offset_l0[i] = 0;
218 if (s->ps.sps->chroma_format_idc != 0) { // FIXME: invert "if" and "for"
219 for (i = 0; i < s->sh.nb_refs[L0]; i++)
220 chroma_weight_l0_flag[i] = get_bits1(gb);
222 for (i = 0; i < s->sh.nb_refs[L0]; i++)
223 chroma_weight_l0_flag[i] = 0;
225 for (i = 0; i < s->sh.nb_refs[L0]; i++) {
226 if (luma_weight_l0_flag[i]) {
227 int delta_luma_weight_l0 = get_se_golomb(gb);
228 s->sh.luma_weight_l0[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l0;
229 s->sh.luma_offset_l0[i] = get_se_golomb(gb);
231 if (chroma_weight_l0_flag[i]) {
232 for (j = 0; j < 2; j++) {
233 int delta_chroma_weight_l0 = get_se_golomb(gb);
234 int delta_chroma_offset_l0 = get_se_golomb(gb);
235 s->sh.chroma_weight_l0[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l0;
236 s->sh.chroma_offset_l0[i][j] = av_clip((delta_chroma_offset_l0 - ((128 * s->sh.chroma_weight_l0[i][j])
237 >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
240 s->sh.chroma_weight_l0[i][0] = 1 << s->sh.chroma_log2_weight_denom;
241 s->sh.chroma_offset_l0[i][0] = 0;
242 s->sh.chroma_weight_l0[i][1] = 1 << s->sh.chroma_log2_weight_denom;
243 s->sh.chroma_offset_l0[i][1] = 0;
246 if (s->sh.slice_type == HEVC_SLICE_B) {
247 for (i = 0; i < s->sh.nb_refs[L1]; i++) {
248 luma_weight_l1_flag[i] = get_bits1(gb);
249 if (!luma_weight_l1_flag[i]) {
250 s->sh.luma_weight_l1[i] = 1 << s->sh.luma_log2_weight_denom;
251 s->sh.luma_offset_l1[i] = 0;
254 if (s->ps.sps->chroma_format_idc != 0) {
255 for (i = 0; i < s->sh.nb_refs[L1]; i++)
256 chroma_weight_l1_flag[i] = get_bits1(gb);
258 for (i = 0; i < s->sh.nb_refs[L1]; i++)
259 chroma_weight_l1_flag[i] = 0;
261 for (i = 0; i < s->sh.nb_refs[L1]; i++) {
262 if (luma_weight_l1_flag[i]) {
263 int delta_luma_weight_l1 = get_se_golomb(gb);
264 s->sh.luma_weight_l1[i] = (1 << s->sh.luma_log2_weight_denom) + delta_luma_weight_l1;
265 s->sh.luma_offset_l1[i] = get_se_golomb(gb);
267 if (chroma_weight_l1_flag[i]) {
268 for (j = 0; j < 2; j++) {
269 int delta_chroma_weight_l1 = get_se_golomb(gb);
270 int delta_chroma_offset_l1 = get_se_golomb(gb);
271 s->sh.chroma_weight_l1[i][j] = (1 << s->sh.chroma_log2_weight_denom) + delta_chroma_weight_l1;
272 s->sh.chroma_offset_l1[i][j] = av_clip((delta_chroma_offset_l1 - ((128 * s->sh.chroma_weight_l1[i][j])
273 >> s->sh.chroma_log2_weight_denom) + 128), -128, 127);
276 s->sh.chroma_weight_l1[i][0] = 1 << s->sh.chroma_log2_weight_denom;
277 s->sh.chroma_offset_l1[i][0] = 0;
278 s->sh.chroma_weight_l1[i][1] = 1 << s->sh.chroma_log2_weight_denom;
279 s->sh.chroma_offset_l1[i][1] = 0;
285 static int decode_lt_rps(HEVCContext *s, LongTermRPS *rps, GetBitContext *gb)
287 const HEVCSPS *sps = s->ps.sps;
288 int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
289 int prev_delta_msb = 0;
290 unsigned int nb_sps = 0, nb_sh;
294 if (!sps->long_term_ref_pics_present_flag)
297 if (sps->num_long_term_ref_pics_sps > 0)
298 nb_sps = get_ue_golomb_long(gb);
299 nb_sh = get_ue_golomb_long(gb);
301 if (nb_sh + nb_sps > FF_ARRAY_ELEMS(rps->poc))
302 return AVERROR_INVALIDDATA;
304 rps->nb_refs = nb_sh + nb_sps;
306 for (i = 0; i < rps->nb_refs; i++) {
307 uint8_t delta_poc_msb_present;
310 uint8_t lt_idx_sps = 0;
312 if (sps->num_long_term_ref_pics_sps > 1)
313 lt_idx_sps = get_bits(gb, av_ceil_log2(sps->num_long_term_ref_pics_sps));
315 rps->poc[i] = sps->lt_ref_pic_poc_lsb_sps[lt_idx_sps];
316 rps->used[i] = sps->used_by_curr_pic_lt_sps_flag[lt_idx_sps];
318 rps->poc[i] = get_bits(gb, sps->log2_max_poc_lsb);
319 rps->used[i] = get_bits1(gb);
322 delta_poc_msb_present = get_bits1(gb);
323 if (delta_poc_msb_present) {
324 int delta = get_ue_golomb_long(gb);
326 if (i && i != nb_sps)
327 delta += prev_delta_msb;
329 rps->poc[i] += s->poc - delta * max_poc_lsb - s->sh.pic_order_cnt_lsb;
330 prev_delta_msb = delta;
337 static void export_stream_params(AVCodecContext *avctx, const HEVCParamSets *ps,
340 const HEVCVPS *vps = (const HEVCVPS*)ps->vps_list[sps->vps_id]->data;
341 const HEVCWindow *ow = &sps->output_window;
342 unsigned int num = 0, den = 0;
344 avctx->pix_fmt = sps->pix_fmt;
345 avctx->coded_width = sps->width;
346 avctx->coded_height = sps->height;
347 avctx->width = sps->width - ow->left_offset - ow->right_offset;
348 avctx->height = sps->height - ow->top_offset - ow->bottom_offset;
349 avctx->has_b_frames = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics;
350 avctx->profile = sps->ptl.general_ptl.profile_idc;
351 avctx->level = sps->ptl.general_ptl.level_idc;
353 ff_set_sar(avctx, sps->vui.sar);
355 if (sps->vui.video_signal_type_present_flag)
356 avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG
359 avctx->color_range = AVCOL_RANGE_MPEG;
361 if (sps->vui.colour_description_present_flag) {
362 avctx->color_primaries = sps->vui.colour_primaries;
363 avctx->color_trc = sps->vui.transfer_characteristic;
364 avctx->colorspace = sps->vui.matrix_coeffs;
366 avctx->color_primaries = AVCOL_PRI_UNSPECIFIED;
367 avctx->color_trc = AVCOL_TRC_UNSPECIFIED;
368 avctx->colorspace = AVCOL_SPC_UNSPECIFIED;
371 if (vps->vps_timing_info_present_flag) {
372 num = vps->vps_num_units_in_tick;
373 den = vps->vps_time_scale;
374 } else if (sps->vui.vui_timing_info_present_flag) {
375 num = sps->vui.vui_num_units_in_tick;
376 den = sps->vui.vui_time_scale;
379 if (num != 0 && den != 0)
380 av_reduce(&avctx->framerate.den, &avctx->framerate.num,
384 static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps)
386 #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + CONFIG_HEVC_D3D11VA_HWACCEL + \
387 CONFIG_HEVC_VAAPI_HWACCEL + CONFIG_HEVC_VDPAU_HWACCEL)
388 enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmt = pix_fmts;
390 if (sps->pix_fmt == AV_PIX_FMT_YUV420P || sps->pix_fmt == AV_PIX_FMT_YUVJ420P ||
391 sps->pix_fmt == AV_PIX_FMT_YUV420P10) {
392 #if CONFIG_HEVC_D3D11VA_HWACCEL
393 *fmt++ = AV_PIX_FMT_D3D11VA_VLD;
395 #if CONFIG_HEVC_DXVA2_HWACCEL
396 *fmt++ = AV_PIX_FMT_DXVA2_VLD;
398 #if CONFIG_HEVC_VAAPI_HWACCEL
399 *fmt++ = AV_PIX_FMT_VAAPI;
402 if (sps->pix_fmt == AV_PIX_FMT_YUV420P || sps->pix_fmt == AV_PIX_FMT_YUVJ420P) {
403 #if CONFIG_HEVC_VDPAU_HWACCEL
404 *fmt++ = AV_PIX_FMT_VDPAU;
408 *fmt++ = sps->pix_fmt;
409 *fmt = AV_PIX_FMT_NONE;
411 return ff_get_format(s->avctx, pix_fmts);
414 static int set_sps(HEVCContext *s, const HEVCSPS *sps,
415 enum AVPixelFormat pix_fmt)
426 ret = pic_arrays_init(s, sps);
430 export_stream_params(s->avctx, &s->ps, sps);
432 s->avctx->pix_fmt = pix_fmt;
434 ff_hevc_pred_init(&s->hpc, sps->bit_depth);
435 ff_hevc_dsp_init (&s->hevcdsp, sps->bit_depth);
436 ff_videodsp_init (&s->vdsp, sps->bit_depth);
438 if (sps->sao_enabled && !s->avctx->hwaccel) {
439 av_frame_unref(s->tmp_frame);
440 ret = ff_get_buffer(s->avctx, s->tmp_frame, AV_GET_BUFFER_FLAG_REF);
443 s->frame = s->tmp_frame;
447 s->ps.vps = (HEVCVPS*) s->ps.vps_list[s->ps.sps->vps_id]->data;
457 static int hls_slice_header(HEVCContext *s)
459 GetBitContext *gb = &s->HEVClc.gb;
460 SliceHeader *sh = &s->sh;
464 sh->first_slice_in_pic_flag = get_bits1(gb);
465 if ((IS_IDR(s) || IS_BLA(s)) && sh->first_slice_in_pic_flag) {
466 s->seq_decode = (s->seq_decode + 1) & 0xff;
469 ff_hevc_clear_refs(s);
472 sh->no_output_of_prior_pics_flag = get_bits1(gb);
474 sh->pps_id = get_ue_golomb_long(gb);
475 if (sh->pps_id >= HEVC_MAX_PPS_COUNT || !s->ps.pps_list[sh->pps_id]) {
476 av_log(s->avctx, AV_LOG_ERROR, "PPS id out of range: %d\n", sh->pps_id);
477 return AVERROR_INVALIDDATA;
479 if (!sh->first_slice_in_pic_flag &&
480 s->ps.pps != (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data) {
481 av_log(s->avctx, AV_LOG_ERROR, "PPS changed between slices.\n");
482 return AVERROR_INVALIDDATA;
484 s->ps.pps = (HEVCPPS*)s->ps.pps_list[sh->pps_id]->data;
486 if (s->ps.sps != (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data) {
487 const HEVCSPS *sps = (HEVCSPS*)s->ps.sps_list[s->ps.pps->sps_id]->data;
488 enum AVPixelFormat pix_fmt;
490 ff_hevc_clear_refs(s);
492 pix_fmt = get_format(s, sps);
496 ret = set_sps(s, sps, pix_fmt);
500 s->seq_decode = (s->seq_decode + 1) & 0xff;
504 sh->dependent_slice_segment_flag = 0;
505 if (!sh->first_slice_in_pic_flag) {
506 int slice_address_length;
508 if (s->ps.pps->dependent_slice_segments_enabled_flag)
509 sh->dependent_slice_segment_flag = get_bits1(gb);
511 slice_address_length = av_ceil_log2(s->ps.sps->ctb_width *
512 s->ps.sps->ctb_height);
513 sh->slice_segment_addr = slice_address_length ? get_bits(gb, slice_address_length) : 0;
514 if (sh->slice_segment_addr >= s->ps.sps->ctb_width * s->ps.sps->ctb_height) {
515 av_log(s->avctx, AV_LOG_ERROR,
516 "Invalid slice segment address: %u.\n",
517 sh->slice_segment_addr);
518 return AVERROR_INVALIDDATA;
521 if (!sh->dependent_slice_segment_flag) {
522 sh->slice_addr = sh->slice_segment_addr;
526 sh->slice_segment_addr = sh->slice_addr = 0;
528 s->slice_initialized = 0;
531 if (!sh->dependent_slice_segment_flag) {
532 s->slice_initialized = 0;
534 for (i = 0; i < s->ps.pps->num_extra_slice_header_bits; i++)
535 skip_bits(gb, 1); // slice_reserved_undetermined_flag[]
537 sh->slice_type = get_ue_golomb_long(gb);
538 if (!(sh->slice_type == HEVC_SLICE_I ||
539 sh->slice_type == HEVC_SLICE_P ||
540 sh->slice_type == HEVC_SLICE_B)) {
541 av_log(s->avctx, AV_LOG_ERROR, "Unknown slice type: %d.\n",
543 return AVERROR_INVALIDDATA;
545 if (IS_IRAP(s) && sh->slice_type != HEVC_SLICE_I) {
546 av_log(s->avctx, AV_LOG_ERROR, "Inter slices in an IRAP frame.\n");
547 return AVERROR_INVALIDDATA;
550 // when flag is not present, picture is inferred to be output
551 sh->pic_output_flag = 1;
552 if (s->ps.pps->output_flag_present_flag)
553 sh->pic_output_flag = get_bits1(gb);
555 if (s->ps.sps->separate_colour_plane_flag)
556 sh->colour_plane_id = get_bits(gb, 2);
561 sh->pic_order_cnt_lsb = get_bits(gb, s->ps.sps->log2_max_poc_lsb);
562 poc = ff_hevc_compute_poc(s, sh->pic_order_cnt_lsb);
563 if (!sh->first_slice_in_pic_flag && poc != s->poc) {
564 av_log(s->avctx, AV_LOG_WARNING,
565 "Ignoring POC change between slices: %d -> %d\n", s->poc, poc);
566 if (s->avctx->err_recognition & AV_EF_EXPLODE)
567 return AVERROR_INVALIDDATA;
572 sh->short_term_ref_pic_set_sps_flag = get_bits1(gb);
573 pos = get_bits_left(gb);
574 if (!sh->short_term_ref_pic_set_sps_flag) {
575 ret = ff_hevc_decode_short_term_rps(gb, s->avctx, &sh->slice_rps, s->ps.sps, 1);
579 sh->short_term_rps = &sh->slice_rps;
581 int numbits, rps_idx;
583 if (!s->ps.sps->nb_st_rps) {
584 av_log(s->avctx, AV_LOG_ERROR, "No ref lists in the SPS.\n");
585 return AVERROR_INVALIDDATA;
588 numbits = av_ceil_log2(s->ps.sps->nb_st_rps);
589 rps_idx = numbits > 0 ? get_bits(gb, numbits) : 0;
590 sh->short_term_rps = &s->ps.sps->st_rps[rps_idx];
592 sh->short_term_ref_pic_set_size = pos - get_bits_left(gb);
594 pos = get_bits_left(gb);
595 ret = decode_lt_rps(s, &sh->long_term_rps, gb);
597 av_log(s->avctx, AV_LOG_WARNING, "Invalid long term RPS.\n");
598 if (s->avctx->err_recognition & AV_EF_EXPLODE)
599 return AVERROR_INVALIDDATA;
601 sh->long_term_ref_pic_set_size = pos - get_bits_left(gb);
603 if (s->ps.sps->sps_temporal_mvp_enabled_flag)
604 sh->slice_temporal_mvp_enabled_flag = get_bits1(gb);
606 sh->slice_temporal_mvp_enabled_flag = 0;
608 s->sh.short_term_rps = NULL;
613 if (s->temporal_id == 0 &&
614 s->nal_unit_type != HEVC_NAL_TRAIL_N &&
615 s->nal_unit_type != HEVC_NAL_TSA_N &&
616 s->nal_unit_type != HEVC_NAL_STSA_N &&
617 s->nal_unit_type != HEVC_NAL_RADL_N &&
618 s->nal_unit_type != HEVC_NAL_RADL_R &&
619 s->nal_unit_type != HEVC_NAL_RASL_N &&
620 s->nal_unit_type != HEVC_NAL_RASL_R)
623 if (s->ps.sps->sao_enabled) {
624 sh->slice_sample_adaptive_offset_flag[0] = get_bits1(gb);
625 sh->slice_sample_adaptive_offset_flag[1] =
626 sh->slice_sample_adaptive_offset_flag[2] = get_bits1(gb);
628 sh->slice_sample_adaptive_offset_flag[0] = 0;
629 sh->slice_sample_adaptive_offset_flag[1] = 0;
630 sh->slice_sample_adaptive_offset_flag[2] = 0;
633 sh->nb_refs[L0] = sh->nb_refs[L1] = 0;
634 if (sh->slice_type == HEVC_SLICE_P || sh->slice_type == HEVC_SLICE_B) {
637 sh->nb_refs[L0] = s->ps.pps->num_ref_idx_l0_default_active;
638 if (sh->slice_type == HEVC_SLICE_B)
639 sh->nb_refs[L1] = s->ps.pps->num_ref_idx_l1_default_active;
641 if (get_bits1(gb)) { // num_ref_idx_active_override_flag
642 sh->nb_refs[L0] = get_ue_golomb_long(gb) + 1;
643 if (sh->slice_type == HEVC_SLICE_B)
644 sh->nb_refs[L1] = get_ue_golomb_long(gb) + 1;
646 if (sh->nb_refs[L0] > HEVC_MAX_REFS || sh->nb_refs[L1] > HEVC_MAX_REFS) {
647 av_log(s->avctx, AV_LOG_ERROR, "Too many refs: %d/%d.\n",
648 sh->nb_refs[L0], sh->nb_refs[L1]);
649 return AVERROR_INVALIDDATA;
652 sh->rpl_modification_flag[0] = 0;
653 sh->rpl_modification_flag[1] = 0;
654 nb_refs = ff_hevc_frame_nb_refs(s);
656 av_log(s->avctx, AV_LOG_ERROR, "Zero refs for a frame with P or B slices.\n");
657 return AVERROR_INVALIDDATA;
660 if (s->ps.pps->lists_modification_present_flag && nb_refs > 1) {
661 sh->rpl_modification_flag[0] = get_bits1(gb);
662 if (sh->rpl_modification_flag[0]) {
663 for (i = 0; i < sh->nb_refs[L0]; i++)
664 sh->list_entry_lx[0][i] = get_bits(gb, av_ceil_log2(nb_refs));
667 if (sh->slice_type == HEVC_SLICE_B) {
668 sh->rpl_modification_flag[1] = get_bits1(gb);
669 if (sh->rpl_modification_flag[1] == 1)
670 for (i = 0; i < sh->nb_refs[L1]; i++)
671 sh->list_entry_lx[1][i] = get_bits(gb, av_ceil_log2(nb_refs));
675 if (sh->slice_type == HEVC_SLICE_B)
676 sh->mvd_l1_zero_flag = get_bits1(gb);
678 if (s->ps.pps->cabac_init_present_flag)
679 sh->cabac_init_flag = get_bits1(gb);
681 sh->cabac_init_flag = 0;
683 sh->collocated_ref_idx = 0;
684 if (sh->slice_temporal_mvp_enabled_flag) {
685 sh->collocated_list = L0;
686 if (sh->slice_type == HEVC_SLICE_B)
687 sh->collocated_list = !get_bits1(gb);
689 if (sh->nb_refs[sh->collocated_list] > 1) {
690 sh->collocated_ref_idx = get_ue_golomb_long(gb);
691 if (sh->collocated_ref_idx >= sh->nb_refs[sh->collocated_list]) {
692 av_log(s->avctx, AV_LOG_ERROR,
693 "Invalid collocated_ref_idx: %d.\n",
694 sh->collocated_ref_idx);
695 return AVERROR_INVALIDDATA;
700 if ((s->ps.pps->weighted_pred_flag && sh->slice_type == HEVC_SLICE_P) ||
701 (s->ps.pps->weighted_bipred_flag && sh->slice_type == HEVC_SLICE_B)) {
702 pred_weight_table(s, gb);
705 sh->max_num_merge_cand = 5 - get_ue_golomb_long(gb);
706 if (sh->max_num_merge_cand < 1 || sh->max_num_merge_cand > 5) {
707 av_log(s->avctx, AV_LOG_ERROR,
708 "Invalid number of merging MVP candidates: %d.\n",
709 sh->max_num_merge_cand);
710 return AVERROR_INVALIDDATA;
714 sh->slice_qp_delta = get_se_golomb(gb);
716 if (s->ps.pps->pic_slice_level_chroma_qp_offsets_present_flag) {
717 sh->slice_cb_qp_offset = get_se_golomb(gb);
718 sh->slice_cr_qp_offset = get_se_golomb(gb);
720 sh->slice_cb_qp_offset = 0;
721 sh->slice_cr_qp_offset = 0;
724 if (s->ps.pps->deblocking_filter_control_present_flag) {
725 int deblocking_filter_override_flag = 0;
727 if (s->ps.pps->deblocking_filter_override_enabled_flag)
728 deblocking_filter_override_flag = get_bits1(gb);
730 if (deblocking_filter_override_flag) {
731 sh->disable_deblocking_filter_flag = get_bits1(gb);
732 if (!sh->disable_deblocking_filter_flag) {
733 sh->beta_offset = get_se_golomb(gb) * 2;
734 sh->tc_offset = get_se_golomb(gb) * 2;
737 sh->disable_deblocking_filter_flag = s->ps.pps->disable_dbf;
738 sh->beta_offset = s->ps.pps->beta_offset;
739 sh->tc_offset = s->ps.pps->tc_offset;
742 sh->disable_deblocking_filter_flag = 0;
747 if (s->ps.pps->seq_loop_filter_across_slices_enabled_flag &&
748 (sh->slice_sample_adaptive_offset_flag[0] ||
749 sh->slice_sample_adaptive_offset_flag[1] ||
750 !sh->disable_deblocking_filter_flag)) {
751 sh->slice_loop_filter_across_slices_enabled_flag = get_bits1(gb);
753 sh->slice_loop_filter_across_slices_enabled_flag = s->ps.pps->seq_loop_filter_across_slices_enabled_flag;
755 } else if (!s->slice_initialized) {
756 av_log(s->avctx, AV_LOG_ERROR, "Independent slice segment missing.\n");
757 return AVERROR_INVALIDDATA;
760 sh->num_entry_point_offsets = 0;
761 if (s->ps.pps->tiles_enabled_flag || s->ps.pps->entropy_coding_sync_enabled_flag) {
762 sh->num_entry_point_offsets = get_ue_golomb_long(gb);
763 if (sh->num_entry_point_offsets > 0) {
764 int offset_len = get_ue_golomb_long(gb) + 1;
766 for (i = 0; i < sh->num_entry_point_offsets; i++)
767 skip_bits(gb, offset_len);
771 if (s->ps.pps->slice_header_extension_present_flag) {
772 unsigned int length = get_ue_golomb_long(gb);
773 for (i = 0; i < length; i++)
774 skip_bits(gb, 8); // slice_header_extension_data_byte
777 // Inferred parameters
778 sh->slice_qp = 26 + s->ps.pps->pic_init_qp_minus26 + sh->slice_qp_delta;
779 if (sh->slice_qp > 51 ||
780 sh->slice_qp < -s->ps.sps->qp_bd_offset) {
781 av_log(s->avctx, AV_LOG_ERROR,
782 "The slice_qp %d is outside the valid range "
785 -s->ps.sps->qp_bd_offset);
786 return AVERROR_INVALIDDATA;
789 sh->slice_ctb_addr_rs = sh->slice_segment_addr;
791 if (!s->sh.slice_ctb_addr_rs && s->sh.dependent_slice_segment_flag) {
792 av_log(s->avctx, AV_LOG_ERROR, "Impossible slice segment.\n");
793 return AVERROR_INVALIDDATA;
796 s->HEVClc.first_qp_group = !s->sh.dependent_slice_segment_flag;
798 if (!s->ps.pps->cu_qp_delta_enabled_flag)
799 s->HEVClc.qp_y = FFUMOD(s->sh.slice_qp + 52 + 2 * s->ps.sps->qp_bd_offset,
800 52 + s->ps.sps->qp_bd_offset) - s->ps.sps->qp_bd_offset;
802 s->slice_initialized = 1;
807 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
809 #define SET_SAO(elem, value) \
811 if (!sao_merge_up_flag && !sao_merge_left_flag) \
813 else if (sao_merge_left_flag) \
814 sao->elem = CTB(s->sao, rx-1, ry).elem; \
815 else if (sao_merge_up_flag) \
816 sao->elem = CTB(s->sao, rx, ry-1).elem; \
821 static void hls_sao_param(HEVCContext *s, int rx, int ry)
823 HEVCLocalContext *lc = &s->HEVClc;
824 int sao_merge_left_flag = 0;
825 int sao_merge_up_flag = 0;
826 int shift = s->ps.sps->bit_depth - FFMIN(s->ps.sps->bit_depth, 10);
827 SAOParams *sao = &CTB(s->sao, rx, ry);
830 if (s->sh.slice_sample_adaptive_offset_flag[0] ||
831 s->sh.slice_sample_adaptive_offset_flag[1]) {
833 if (lc->ctb_left_flag)
834 sao_merge_left_flag = ff_hevc_sao_merge_flag_decode(s);
836 if (ry > 0 && !sao_merge_left_flag) {
838 sao_merge_up_flag = ff_hevc_sao_merge_flag_decode(s);
842 for (c_idx = 0; c_idx < 3; c_idx++) {
843 if (!s->sh.slice_sample_adaptive_offset_flag[c_idx]) {
844 sao->type_idx[c_idx] = SAO_NOT_APPLIED;
849 sao->type_idx[2] = sao->type_idx[1];
850 sao->eo_class[2] = sao->eo_class[1];
852 SET_SAO(type_idx[c_idx], ff_hevc_sao_type_idx_decode(s));
855 if (sao->type_idx[c_idx] == SAO_NOT_APPLIED)
858 for (i = 0; i < 4; i++)
859 SET_SAO(offset_abs[c_idx][i], ff_hevc_sao_offset_abs_decode(s));
861 if (sao->type_idx[c_idx] == SAO_BAND) {
862 for (i = 0; i < 4; i++) {
863 if (sao->offset_abs[c_idx][i]) {
864 SET_SAO(offset_sign[c_idx][i],
865 ff_hevc_sao_offset_sign_decode(s));
867 sao->offset_sign[c_idx][i] = 0;
870 SET_SAO(band_position[c_idx], ff_hevc_sao_band_position_decode(s));
871 } else if (c_idx != 2) {
872 SET_SAO(eo_class[c_idx], ff_hevc_sao_eo_class_decode(s));
875 // Inferred parameters
876 sao->offset_val[c_idx][0] = 0;
877 for (i = 0; i < 4; i++) {
878 sao->offset_val[c_idx][i + 1] = sao->offset_abs[c_idx][i] << shift;
879 if (sao->type_idx[c_idx] == SAO_EDGE) {
881 sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
882 } else if (sao->offset_sign[c_idx][i]) {
883 sao->offset_val[c_idx][i + 1] = -sao->offset_val[c_idx][i + 1];
892 static void hls_residual_coding(HEVCContext *s, int x0, int y0,
893 int log2_trafo_size, enum ScanType scan_idx,
896 #define GET_COORD(offset, n) \
898 x_c = (scan_x_cg[offset >> 4] << 2) + scan_x_off[n]; \
899 y_c = (scan_y_cg[offset >> 4] << 2) + scan_y_off[n]; \
901 HEVCLocalContext *lc = &s->HEVClc;
902 int transform_skip_flag = 0;
904 int last_significant_coeff_x, last_significant_coeff_y;
908 int greater1_ctx = 1;
911 int x_cg_last_sig, y_cg_last_sig;
913 const uint8_t *scan_x_cg, *scan_y_cg, *scan_x_off, *scan_y_off;
915 ptrdiff_t stride = s->frame->linesize[c_idx];
916 int hshift = s->ps.sps->hshift[c_idx];
917 int vshift = s->ps.sps->vshift[c_idx];
918 uint8_t *dst = &s->frame->data[c_idx][(y0 >> vshift) * stride +
919 ((x0 >> hshift) << s->ps.sps->pixel_shift)];
920 LOCAL_ALIGNED_32(int16_t, coeffs, [MAX_TB_SIZE * MAX_TB_SIZE]);
921 LOCAL_ALIGNED_8(uint8_t, significant_coeff_group_flag, [8], [8]);
923 int trafo_size = 1 << log2_trafo_size;
924 int i, qp, shift, add, scale, scale_m;
925 static const uint8_t level_scale[] = { 40, 45, 51, 57, 64, 72 };
926 const uint8_t *scale_matrix;
929 memset(coeffs, 0, sizeof(int16_t) * MAX_TB_SIZE * MAX_TB_SIZE);
930 memset(significant_coeff_group_flag, 0, sizeof(uint8_t) * 8 * 8);
931 // Derive QP for dequant
932 if (!lc->cu.cu_transquant_bypass_flag) {
933 static const int qp_c[] = {
934 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37
937 static const uint8_t rem6[51 + 2 * 6 + 1] = {
938 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2,
939 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5,
940 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3,
943 static const uint8_t div6[51 + 2 * 6 + 1] = {
944 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3,
945 3, 3, 3, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6,
946 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10,
951 qp = qp_y + s->ps.sps->qp_bd_offset;
956 offset = s->ps.pps->cb_qp_offset + s->sh.slice_cb_qp_offset;
958 offset = s->ps.pps->cr_qp_offset + s->sh.slice_cr_qp_offset;
960 qp_i = av_clip(qp_y + offset, -s->ps.sps->qp_bd_offset, 57);
966 qp = qp_c[qp_i - 30];
968 qp += s->ps.sps->qp_bd_offset;
971 shift = s->ps.sps->bit_depth + log2_trafo_size - 5;
972 add = 1 << (shift - 1);
973 scale = level_scale[rem6[qp]] << (div6[qp]);
974 scale_m = 16; // default when no custom scaling lists.
977 if (s->ps.sps->scaling_list_enable_flag) {
978 const ScalingList *sl = s->ps.pps->scaling_list_data_present_flag ?
979 &s->ps.pps->scaling_list : &s->ps.sps->scaling_list;
980 int matrix_id = lc->cu.pred_mode != MODE_INTRA;
982 if (log2_trafo_size != 5)
983 matrix_id = 3 * matrix_id + c_idx;
985 scale_matrix = sl->sl[log2_trafo_size - 2][matrix_id];
986 if (log2_trafo_size >= 4)
987 dc_scale = sl->sl_dc[log2_trafo_size - 4][matrix_id];
991 if (s->ps.pps->transform_skip_enabled_flag &&
992 !lc->cu.cu_transquant_bypass_flag &&
993 log2_trafo_size == 2) {
994 transform_skip_flag = ff_hevc_transform_skip_flag_decode(s, c_idx);
997 last_significant_coeff_x =
998 ff_hevc_last_significant_coeff_x_prefix_decode(s, c_idx, log2_trafo_size);
999 last_significant_coeff_y =
1000 ff_hevc_last_significant_coeff_y_prefix_decode(s, c_idx, log2_trafo_size);
1002 if (last_significant_coeff_x > 3) {
1003 int suffix = ff_hevc_last_significant_coeff_suffix_decode(s, last_significant_coeff_x);
1004 last_significant_coeff_x = (1 << ((last_significant_coeff_x >> 1) - 1)) *
1005 (2 + (last_significant_coeff_x & 1)) +
1009 if (last_significant_coeff_y > 3) {
1010 int suffix = ff_hevc_last_significant_coeff_suffix_decode(s, last_significant_coeff_y);
1011 last_significant_coeff_y = (1 << ((last_significant_coeff_y >> 1) - 1)) *
1012 (2 + (last_significant_coeff_y & 1)) +
1016 if (scan_idx == SCAN_VERT)
1017 FFSWAP(int, last_significant_coeff_x, last_significant_coeff_y);
1019 x_cg_last_sig = last_significant_coeff_x >> 2;
1020 y_cg_last_sig = last_significant_coeff_y >> 2;
1024 int last_x_c = last_significant_coeff_x & 3;
1025 int last_y_c = last_significant_coeff_y & 3;
1027 scan_x_off = ff_hevc_diag_scan4x4_x;
1028 scan_y_off = ff_hevc_diag_scan4x4_y;
1029 num_coeff = diag_scan4x4_inv[last_y_c][last_x_c];
1030 if (trafo_size == 4) {
1031 scan_x_cg = scan_1x1;
1032 scan_y_cg = scan_1x1;
1033 } else if (trafo_size == 8) {
1034 num_coeff += diag_scan2x2_inv[y_cg_last_sig][x_cg_last_sig] << 4;
1035 scan_x_cg = diag_scan2x2_x;
1036 scan_y_cg = diag_scan2x2_y;
1037 } else if (trafo_size == 16) {
1038 num_coeff += diag_scan4x4_inv[y_cg_last_sig][x_cg_last_sig] << 4;
1039 scan_x_cg = ff_hevc_diag_scan4x4_x;
1040 scan_y_cg = ff_hevc_diag_scan4x4_y;
1041 } else { // trafo_size == 32
1042 num_coeff += diag_scan8x8_inv[y_cg_last_sig][x_cg_last_sig] << 4;
1043 scan_x_cg = ff_hevc_diag_scan8x8_x;
1044 scan_y_cg = ff_hevc_diag_scan8x8_y;
1049 scan_x_cg = horiz_scan2x2_x;
1050 scan_y_cg = horiz_scan2x2_y;
1051 scan_x_off = horiz_scan4x4_x;
1052 scan_y_off = horiz_scan4x4_y;
1053 num_coeff = horiz_scan8x8_inv[last_significant_coeff_y][last_significant_coeff_x];
1055 default: //SCAN_VERT
1056 scan_x_cg = horiz_scan2x2_y;
1057 scan_y_cg = horiz_scan2x2_x;
1058 scan_x_off = horiz_scan4x4_y;
1059 scan_y_off = horiz_scan4x4_x;
1060 num_coeff = horiz_scan8x8_inv[last_significant_coeff_x][last_significant_coeff_y];
1064 num_last_subset = (num_coeff - 1) >> 4;
1066 for (i = num_last_subset; i >= 0; i--) {
1068 int x_cg, y_cg, x_c, y_c;
1069 int implicit_non_zero_coeff = 0;
1070 int64_t trans_coeff_level;
1072 int offset = i << 4;
1074 uint8_t significant_coeff_flag_idx[16];
1075 uint8_t nb_significant_coeff_flag = 0;
1077 x_cg = scan_x_cg[i];
1078 y_cg = scan_y_cg[i];
1080 if (i < num_last_subset && i > 0) {
1082 if (x_cg < (1 << (log2_trafo_size - 2)) - 1)
1083 ctx_cg += significant_coeff_group_flag[x_cg + 1][y_cg];
1084 if (y_cg < (1 << (log2_trafo_size - 2)) - 1)
1085 ctx_cg += significant_coeff_group_flag[x_cg][y_cg + 1];
1087 significant_coeff_group_flag[x_cg][y_cg] =
1088 ff_hevc_significant_coeff_group_flag_decode(s, c_idx, ctx_cg);
1089 implicit_non_zero_coeff = 1;
1091 significant_coeff_group_flag[x_cg][y_cg] =
1092 ((x_cg == x_cg_last_sig && y_cg == y_cg_last_sig) ||
1093 (x_cg == 0 && y_cg == 0));
1096 last_scan_pos = num_coeff - offset - 1;
1098 if (i == num_last_subset) {
1099 n_end = last_scan_pos - 1;
1100 significant_coeff_flag_idx[0] = last_scan_pos;
1101 nb_significant_coeff_flag = 1;
1106 if (x_cg < ((1 << log2_trafo_size) - 1) >> 2)
1107 prev_sig = significant_coeff_group_flag[x_cg + 1][y_cg];
1108 if (y_cg < ((1 << log2_trafo_size) - 1) >> 2)
1109 prev_sig += significant_coeff_group_flag[x_cg][y_cg + 1] << 1;
1111 for (n = n_end; n >= 0; n--) {
1112 GET_COORD(offset, n);
1114 if (significant_coeff_group_flag[x_cg][y_cg] &&
1115 (n > 0 || implicit_non_zero_coeff == 0)) {
1116 if (ff_hevc_significant_coeff_flag_decode(s, c_idx, x_c, y_c,
1120 significant_coeff_flag_idx[nb_significant_coeff_flag] = n;
1121 nb_significant_coeff_flag++;
1122 implicit_non_zero_coeff = 0;
1125 int last_cg = (x_c == (x_cg << 2) && y_c == (y_cg << 2));
1126 if (last_cg && implicit_non_zero_coeff && significant_coeff_group_flag[x_cg][y_cg]) {
1127 significant_coeff_flag_idx[nb_significant_coeff_flag] = n;
1128 nb_significant_coeff_flag++;
1133 n_end = nb_significant_coeff_flag;
1136 int first_nz_pos_in_cg = 16;
1137 int last_nz_pos_in_cg = -1;
1138 int c_rice_param = 0;
1139 int first_greater1_coeff_idx = -1;
1140 uint8_t coeff_abs_level_greater1_flag[16] = { 0 };
1141 uint16_t coeff_sign_flag;
1143 int sign_hidden = 0;
1145 // initialize first elem of coeff_bas_level_greater1_flag
1146 int ctx_set = (i > 0 && c_idx == 0) ? 2 : 0;
1148 if (!(i == num_last_subset) && greater1_ctx == 0)
1151 last_nz_pos_in_cg = significant_coeff_flag_idx[0];
1153 for (m = 0; m < (n_end > 8 ? 8 : n_end); m++) {
1154 int n_idx = significant_coeff_flag_idx[m];
1155 int inc = (ctx_set << 2) + greater1_ctx;
1156 coeff_abs_level_greater1_flag[n_idx] =
1157 ff_hevc_coeff_abs_level_greater1_flag_decode(s, c_idx, inc);
1158 if (coeff_abs_level_greater1_flag[n_idx]) {
1160 } else if (greater1_ctx > 0 && greater1_ctx < 3) {
1164 if (coeff_abs_level_greater1_flag[n_idx] &&
1165 first_greater1_coeff_idx == -1)
1166 first_greater1_coeff_idx = n_idx;
1168 first_nz_pos_in_cg = significant_coeff_flag_idx[n_end - 1];
1169 sign_hidden = last_nz_pos_in_cg - first_nz_pos_in_cg >= 4 &&
1170 !lc->cu.cu_transquant_bypass_flag;
1172 if (first_greater1_coeff_idx != -1) {
1173 coeff_abs_level_greater1_flag[first_greater1_coeff_idx] += ff_hevc_coeff_abs_level_greater2_flag_decode(s, c_idx, ctx_set);
1175 if (!s->ps.pps->sign_data_hiding_flag || !sign_hidden) {
1176 coeff_sign_flag = ff_hevc_coeff_sign_flag(s, nb_significant_coeff_flag) << (16 - nb_significant_coeff_flag);
1178 coeff_sign_flag = ff_hevc_coeff_sign_flag(s, nb_significant_coeff_flag - 1) << (16 - (nb_significant_coeff_flag - 1));
1181 for (m = 0; m < n_end; m++) {
1182 n = significant_coeff_flag_idx[m];
1183 GET_COORD(offset, n);
1184 trans_coeff_level = 1 + coeff_abs_level_greater1_flag[n];
1185 if (trans_coeff_level == ((m < 8) ?
1186 ((n == first_greater1_coeff_idx) ? 3 : 2) : 1)) {
1187 trans_coeff_level += ff_hevc_coeff_abs_level_remaining(s, trans_coeff_level, c_rice_param);
1188 if ((trans_coeff_level) > (3 * (1 << c_rice_param)))
1189 c_rice_param = FFMIN(c_rice_param + 1, 4);
1191 if (s->ps.pps->sign_data_hiding_flag && sign_hidden) {
1192 sum_abs += trans_coeff_level;
1193 if (n == first_nz_pos_in_cg && ((sum_abs & 1) == 1))
1194 trans_coeff_level = -trans_coeff_level;
1196 if (coeff_sign_flag >> 15)
1197 trans_coeff_level = -trans_coeff_level;
1198 coeff_sign_flag <<= 1;
1199 if (!lc->cu.cu_transquant_bypass_flag) {
1200 if (s->ps.sps->scaling_list_enable_flag) {
1201 if (y_c || x_c || log2_trafo_size < 4) {
1203 switch (log2_trafo_size) {
1204 case 3: pos = (y_c << 3) + x_c; break;
1205 case 4: pos = ((y_c >> 1) << 3) + (x_c >> 1); break;
1206 case 5: pos = ((y_c >> 2) << 3) + (x_c >> 2); break;
1207 default: pos = (y_c << 2) + x_c;
1209 scale_m = scale_matrix[pos];
1214 trans_coeff_level = (trans_coeff_level * (int64_t)scale * (int64_t)scale_m + add) >> shift;
1215 if(trans_coeff_level < 0) {
1216 if((~trans_coeff_level) & 0xFffffffffff8000)
1217 trans_coeff_level = -32768;
1219 if (trans_coeff_level & 0xffffffffffff8000)
1220 trans_coeff_level = 32767;
1223 coeffs[y_c * trafo_size + x_c] = trans_coeff_level;
1228 if (!lc->cu.cu_transquant_bypass_flag) {
1229 if (transform_skip_flag)
1230 s->hevcdsp.dequant(coeffs);
1231 else if (lc->cu.pred_mode == MODE_INTRA && c_idx == 0 &&
1232 log2_trafo_size == 2)
1233 s->hevcdsp.transform_4x4_luma(coeffs);
1235 int max_xy = FFMAX(last_significant_coeff_x, last_significant_coeff_y);
1237 s->hevcdsp.idct_dc[log2_trafo_size - 2](coeffs);
1239 int col_limit = last_significant_coeff_x + last_significant_coeff_y + 4;
1241 col_limit = FFMIN(4, col_limit);
1242 else if (max_xy < 8)
1243 col_limit = FFMIN(8, col_limit);
1244 else if (max_xy < 12)
1245 col_limit = FFMIN(24, col_limit);
1246 s->hevcdsp.idct[log2_trafo_size - 2](coeffs, col_limit);
1250 s->hevcdsp.add_residual[log2_trafo_size - 2](dst, coeffs, stride);
1253 static int hls_transform_unit(HEVCContext *s, int x0, int y0,
1254 int xBase, int yBase, int cb_xBase, int cb_yBase,
1255 int log2_cb_size, int log2_trafo_size,
1256 int blk_idx, int cbf_luma, int cbf_cb, int cbf_cr)
1258 HEVCLocalContext *lc = &s->HEVClc;
1260 if (lc->cu.pred_mode == MODE_INTRA) {
1261 int trafo_size = 1 << log2_trafo_size;
1262 ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1264 s->hpc.intra_pred[log2_trafo_size - 2](s, x0, y0, 0);
1265 if (log2_trafo_size > 2) {
1266 trafo_size = trafo_size << (s->ps.sps->hshift[1] - 1);
1267 ff_hevc_set_neighbour_available(s, x0, y0, trafo_size, trafo_size);
1268 s->hpc.intra_pred[log2_trafo_size - 3](s, x0, y0, 1);
1269 s->hpc.intra_pred[log2_trafo_size - 3](s, x0, y0, 2);
1270 } else if (blk_idx == 3) {
1271 trafo_size = trafo_size << s->ps.sps->hshift[1];
1272 ff_hevc_set_neighbour_available(s, xBase, yBase,
1273 trafo_size, trafo_size);
1274 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 1);
1275 s->hpc.intra_pred[log2_trafo_size - 2](s, xBase, yBase, 2);
1279 if (cbf_luma || cbf_cb || cbf_cr) {
1280 int scan_idx = SCAN_DIAG;
1281 int scan_idx_c = SCAN_DIAG;
1283 if (s->ps.pps->cu_qp_delta_enabled_flag && !lc->tu.is_cu_qp_delta_coded) {
1284 lc->tu.cu_qp_delta = ff_hevc_cu_qp_delta_abs(s);
1285 if (lc->tu.cu_qp_delta != 0)
1286 if (ff_hevc_cu_qp_delta_sign_flag(s) == 1)
1287 lc->tu.cu_qp_delta = -lc->tu.cu_qp_delta;
1288 lc->tu.is_cu_qp_delta_coded = 1;
1290 if (lc->tu.cu_qp_delta < -(26 + s->ps.sps->qp_bd_offset / 2) ||
1291 lc->tu.cu_qp_delta > (25 + s->ps.sps->qp_bd_offset / 2)) {
1292 av_log(s->avctx, AV_LOG_ERROR,
1293 "The cu_qp_delta %d is outside the valid range "
1296 -(26 + s->ps.sps->qp_bd_offset / 2),
1297 (25 + s->ps.sps->qp_bd_offset / 2));
1298 return AVERROR_INVALIDDATA;
1301 ff_hevc_set_qPy(s, x0, y0, cb_xBase, cb_yBase, log2_cb_size);
1304 if (lc->cu.pred_mode == MODE_INTRA && log2_trafo_size < 4) {
1305 if (lc->tu.cur_intra_pred_mode >= 6 &&
1306 lc->tu.cur_intra_pred_mode <= 14) {
1307 scan_idx = SCAN_VERT;
1308 } else if (lc->tu.cur_intra_pred_mode >= 22 &&
1309 lc->tu.cur_intra_pred_mode <= 30) {
1310 scan_idx = SCAN_HORIZ;
1313 if (lc->pu.intra_pred_mode_c >= 6 &&
1314 lc->pu.intra_pred_mode_c <= 14) {
1315 scan_idx_c = SCAN_VERT;
1316 } else if (lc->pu.intra_pred_mode_c >= 22 &&
1317 lc->pu.intra_pred_mode_c <= 30) {
1318 scan_idx_c = SCAN_HORIZ;
1323 hls_residual_coding(s, x0, y0, log2_trafo_size, scan_idx, 0);
1324 if (log2_trafo_size > 2) {
1326 hls_residual_coding(s, x0, y0, log2_trafo_size - 1, scan_idx_c, 1);
1328 hls_residual_coding(s, x0, y0, log2_trafo_size - 1, scan_idx_c, 2);
1329 } else if (blk_idx == 3) {
1331 hls_residual_coding(s, xBase, yBase, log2_trafo_size, scan_idx_c, 1);
1333 hls_residual_coding(s, xBase, yBase, log2_trafo_size, scan_idx_c, 2);
1339 static void set_deblocking_bypass(HEVCContext *s, int x0, int y0, int log2_cb_size)
1341 int cb_size = 1 << log2_cb_size;
1342 int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
1344 int min_pu_width = s->ps.sps->min_pu_width;
1345 int x_end = FFMIN(x0 + cb_size, s->ps.sps->width);
1346 int y_end = FFMIN(y0 + cb_size, s->ps.sps->height);
1349 for (j = (y0 >> log2_min_pu_size); j < (y_end >> log2_min_pu_size); j++)
1350 for (i = (x0 >> log2_min_pu_size); i < (x_end >> log2_min_pu_size); i++)
1351 s->is_pcm[i + j * min_pu_width] = 2;
1354 static int hls_transform_tree(HEVCContext *s, int x0, int y0,
1355 int xBase, int yBase, int cb_xBase, int cb_yBase,
1356 int log2_cb_size, int log2_trafo_size,
1357 int trafo_depth, int blk_idx,
1358 int cbf_cb, int cbf_cr)
1360 HEVCLocalContext *lc = &s->HEVClc;
1361 uint8_t split_transform_flag;
1364 if (lc->cu.intra_split_flag) {
1365 if (trafo_depth == 1)
1366 lc->tu.cur_intra_pred_mode = lc->pu.intra_pred_mode[blk_idx];
1368 lc->tu.cur_intra_pred_mode = lc->pu.intra_pred_mode[0];
1371 if (log2_trafo_size <= s->ps.sps->log2_max_trafo_size &&
1372 log2_trafo_size > s->ps.sps->log2_min_tb_size &&
1373 trafo_depth < lc->cu.max_trafo_depth &&
1374 !(lc->cu.intra_split_flag && trafo_depth == 0)) {
1375 split_transform_flag = ff_hevc_split_transform_flag_decode(s, log2_trafo_size);
1377 int inter_split = s->ps.sps->max_transform_hierarchy_depth_inter == 0 &&
1378 lc->cu.pred_mode == MODE_INTER &&
1379 lc->cu.part_mode != PART_2Nx2N &&
1382 split_transform_flag = log2_trafo_size > s->ps.sps->log2_max_trafo_size ||
1383 (lc->cu.intra_split_flag && trafo_depth == 0) ||
1387 if (log2_trafo_size > 2 && (trafo_depth == 0 || cbf_cb))
1388 cbf_cb = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1389 else if (log2_trafo_size > 2 || trafo_depth == 0)
1391 if (log2_trafo_size > 2 && (trafo_depth == 0 || cbf_cr))
1392 cbf_cr = ff_hevc_cbf_cb_cr_decode(s, trafo_depth);
1393 else if (log2_trafo_size > 2 || trafo_depth == 0)
1396 if (split_transform_flag) {
1397 const int trafo_size_split = 1 << (log2_trafo_size - 1);
1398 const int x1 = x0 + trafo_size_split;
1399 const int y1 = y0 + trafo_size_split;
1401 #define SUBDIVIDE(x, y, idx) \
1403 ret = hls_transform_tree(s, x, y, x0, y0, cb_xBase, cb_yBase, log2_cb_size, \
1404 log2_trafo_size - 1, trafo_depth + 1, idx, \
1410 SUBDIVIDE(x0, y0, 0);
1411 SUBDIVIDE(x1, y0, 1);
1412 SUBDIVIDE(x0, y1, 2);
1413 SUBDIVIDE(x1, y1, 3);
1417 int min_tu_size = 1 << s->ps.sps->log2_min_tb_size;
1418 int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
1419 int min_tu_width = s->ps.sps->min_tb_width;
1422 if (lc->cu.pred_mode == MODE_INTRA || trafo_depth != 0 ||
1424 cbf_luma = ff_hevc_cbf_luma_decode(s, trafo_depth);
1426 ret = hls_transform_unit(s, x0, y0, xBase, yBase, cb_xBase, cb_yBase,
1427 log2_cb_size, log2_trafo_size,
1428 blk_idx, cbf_luma, cbf_cb, cbf_cr);
1431 // TODO: store cbf_luma somewhere else
1434 for (i = 0; i < (1 << log2_trafo_size); i += min_tu_size)
1435 for (j = 0; j < (1 << log2_trafo_size); j += min_tu_size) {
1436 int x_tu = (x0 + j) >> log2_min_tu_size;
1437 int y_tu = (y0 + i) >> log2_min_tu_size;
1438 s->cbf_luma[y_tu * min_tu_width + x_tu] = 1;
1441 if (!s->sh.disable_deblocking_filter_flag) {
1442 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_trafo_size);
1443 if (s->ps.pps->transquant_bypass_enable_flag &&
1444 lc->cu.cu_transquant_bypass_flag)
1445 set_deblocking_bypass(s, x0, y0, log2_trafo_size);
1451 static int hls_pcm_sample(HEVCContext *s, int x0, int y0, int log2_cb_size)
1453 //TODO: non-4:2:0 support
1454 HEVCLocalContext *lc = &s->HEVClc;
1456 int cb_size = 1 << log2_cb_size;
1457 ptrdiff_t stride0 = s->frame->linesize[0];
1458 ptrdiff_t stride1 = s->frame->linesize[1];
1459 ptrdiff_t stride2 = s->frame->linesize[2];
1460 uint8_t *dst0 = &s->frame->data[0][y0 * stride0 + (x0 << s->ps.sps->pixel_shift)];
1461 uint8_t *dst1 = &s->frame->data[1][(y0 >> s->ps.sps->vshift[1]) * stride1 + ((x0 >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
1462 uint8_t *dst2 = &s->frame->data[2][(y0 >> s->ps.sps->vshift[2]) * stride2 + ((x0 >> s->ps.sps->hshift[2]) << s->ps.sps->pixel_shift)];
1464 int length = cb_size * cb_size * s->ps.sps->pcm.bit_depth + ((cb_size * cb_size) >> 1) * s->ps.sps->pcm.bit_depth_chroma;
1465 const uint8_t *pcm = skip_bytes(&lc->cc, (length + 7) >> 3);
1468 if (!s->sh.disable_deblocking_filter_flag)
1469 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
1471 ret = init_get_bits(&gb, pcm, length);
1475 s->hevcdsp.put_pcm(dst0, stride0, cb_size, &gb, s->ps.sps->pcm.bit_depth);
1476 s->hevcdsp.put_pcm(dst1, stride1, cb_size / 2, &gb, s->ps.sps->pcm.bit_depth_chroma);
1477 s->hevcdsp.put_pcm(dst2, stride2, cb_size / 2, &gb, s->ps.sps->pcm.bit_depth_chroma);
1481 static void hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size)
1483 HEVCLocalContext *lc = &s->HEVClc;
1484 int x = ff_hevc_abs_mvd_greater0_flag_decode(s);
1485 int y = ff_hevc_abs_mvd_greater0_flag_decode(s);
1488 x += ff_hevc_abs_mvd_greater1_flag_decode(s);
1490 y += ff_hevc_abs_mvd_greater1_flag_decode(s);
1493 case 2: lc->pu.mvd.x = ff_hevc_mvd_decode(s); break;
1494 case 1: lc->pu.mvd.x = ff_hevc_mvd_sign_flag_decode(s); break;
1495 case 0: lc->pu.mvd.x = 0; break;
1499 case 2: lc->pu.mvd.y = ff_hevc_mvd_decode(s); break;
1500 case 1: lc->pu.mvd.y = ff_hevc_mvd_sign_flag_decode(s); break;
1501 case 0: lc->pu.mvd.y = 0; break;
1506 * 8.5.3.2.2.1 Luma sample interpolation process
1508 * @param s HEVC decoding context
1509 * @param dst target buffer for block data at block position
1510 * @param dststride stride of the dst buffer
1511 * @param ref reference picture buffer at origin (0, 0)
1512 * @param mv motion vector (relative to block position) to get pixel data from
1513 * @param x_off horizontal position of block from origin (0, 0)
1514 * @param y_off vertical position of block from origin (0, 0)
1515 * @param block_w width of block
1516 * @param block_h height of block
1518 static void luma_mc(HEVCContext *s, int16_t *dst, ptrdiff_t dststride,
1519 AVFrame *ref, const Mv *mv, int x_off, int y_off,
1520 int block_w, int block_h, int pred_idx)
1522 HEVCLocalContext *lc = &s->HEVClc;
1523 uint8_t *src = ref->data[0];
1524 ptrdiff_t srcstride = ref->linesize[0];
1525 int pic_width = s->ps.sps->width;
1526 int pic_height = s->ps.sps->height;
1530 int extra_left = ff_hevc_qpel_extra_before[mx];
1531 int extra_top = ff_hevc_qpel_extra_before[my];
1533 x_off += mv->x >> 2;
1534 y_off += mv->y >> 2;
1535 src += y_off * srcstride + (x_off * (1 << s->ps.sps->pixel_shift));
1537 if (x_off < extra_left || y_off < extra_top ||
1538 x_off >= pic_width - block_w - ff_hevc_qpel_extra_after[mx] ||
1539 y_off >= pic_height - block_h - ff_hevc_qpel_extra_after[my]) {
1540 const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1541 int offset = extra_top * srcstride + (extra_left << s->ps.sps->pixel_shift);
1542 int buf_offset = extra_top *
1543 edge_emu_stride + (extra_left << s->ps.sps->pixel_shift);
1545 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src - offset,
1546 edge_emu_stride, srcstride,
1547 block_w + ff_hevc_qpel_extra[mx],
1548 block_h + ff_hevc_qpel_extra[my],
1549 x_off - extra_left, y_off - extra_top,
1550 pic_width, pic_height);
1551 src = lc->edge_emu_buffer + buf_offset;
1552 srcstride = edge_emu_stride;
1554 s->hevcdsp.put_hevc_qpel[!!my][!!mx][pred_idx](dst, dststride, src, srcstride,
1555 block_h, mx, my, lc->mc_buffer);
1559 * 8.5.3.2.2.2 Chroma sample interpolation process
1561 * @param s HEVC decoding context
1562 * @param dst1 target buffer for block data at block position (U plane)
1563 * @param dst2 target buffer for block data at block position (V plane)
1564 * @param dststride stride of the dst1 and dst2 buffers
1565 * @param ref reference picture buffer at origin (0, 0)
1566 * @param mv motion vector (relative to block position) to get pixel data from
1567 * @param x_off horizontal position of block from origin (0, 0)
1568 * @param y_off vertical position of block from origin (0, 0)
1569 * @param block_w width of block
1570 * @param block_h height of block
1572 static void chroma_mc(HEVCContext *s, int16_t *dst1, int16_t *dst2,
1573 ptrdiff_t dststride, AVFrame *ref, const Mv *mv,
1574 int x_off, int y_off, int block_w, int block_h, int pred_idx)
1576 HEVCLocalContext *lc = &s->HEVClc;
1577 uint8_t *src1 = ref->data[1];
1578 uint8_t *src2 = ref->data[2];
1579 ptrdiff_t src1stride = ref->linesize[1];
1580 ptrdiff_t src2stride = ref->linesize[2];
1581 int pic_width = s->ps.sps->width >> 1;
1582 int pic_height = s->ps.sps->height >> 1;
1587 x_off += mv->x >> 3;
1588 y_off += mv->y >> 3;
1589 src1 += y_off * src1stride + (x_off * (1 << s->ps.sps->pixel_shift));
1590 src2 += y_off * src2stride + (x_off * (1 << s->ps.sps->pixel_shift));
1592 if (x_off < EPEL_EXTRA_BEFORE || y_off < EPEL_EXTRA_AFTER ||
1593 x_off >= pic_width - block_w - EPEL_EXTRA_AFTER ||
1594 y_off >= pic_height - block_h - EPEL_EXTRA_AFTER) {
1595 const ptrdiff_t edge_emu_stride = EDGE_EMU_BUFFER_STRIDE << s->ps.sps->pixel_shift;
1596 int offset1 = EPEL_EXTRA_BEFORE * (src1stride + (1 << s->ps.sps->pixel_shift));
1597 int buf_offset1 = EPEL_EXTRA_BEFORE *
1598 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1599 int offset2 = EPEL_EXTRA_BEFORE * (src2stride + (1 << s->ps.sps->pixel_shift));
1600 int buf_offset2 = EPEL_EXTRA_BEFORE *
1601 (edge_emu_stride + (1 << s->ps.sps->pixel_shift));
1603 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src1 - offset1,
1604 edge_emu_stride, src1stride,
1605 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1606 x_off - EPEL_EXTRA_BEFORE,
1607 y_off - EPEL_EXTRA_BEFORE,
1608 pic_width, pic_height);
1610 src1 = lc->edge_emu_buffer + buf_offset1;
1611 src1stride = edge_emu_stride;
1612 s->hevcdsp.put_hevc_epel[!!my][!!mx][pred_idx](dst1, dststride, src1, src1stride,
1613 block_h, mx, my, lc->mc_buffer);
1615 s->vdsp.emulated_edge_mc(lc->edge_emu_buffer, src2 - offset2,
1616 edge_emu_stride, src2stride,
1617 block_w + EPEL_EXTRA, block_h + EPEL_EXTRA,
1618 x_off - EPEL_EXTRA_BEFORE,
1619 y_off - EPEL_EXTRA_BEFORE,
1620 pic_width, pic_height);
1621 src2 = lc->edge_emu_buffer + buf_offset2;
1622 src2stride = edge_emu_stride;
1624 s->hevcdsp.put_hevc_epel[!!my][!!mx][pred_idx](dst2, dststride, src2, src2stride,
1625 block_h, mx, my, lc->mc_buffer);
1627 s->hevcdsp.put_hevc_epel[!!my][!!mx][pred_idx](dst1, dststride, src1, src1stride,
1628 block_h, mx, my, lc->mc_buffer);
1629 s->hevcdsp.put_hevc_epel[!!my][!!mx][pred_idx](dst2, dststride, src2, src2stride,
1630 block_h, mx, my, lc->mc_buffer);
1634 static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref,
1635 const Mv *mv, int y0, int height)
1637 int y = (mv->y >> 2) + y0 + height + 9;
1638 ff_thread_await_progress(&ref->tf, y, 0);
1641 static void hevc_luma_mv_mpv_mode(HEVCContext *s, int x0, int y0, int nPbW,
1642 int nPbH, int log2_cb_size, int part_idx,
1643 int merge_idx, MvField *mv)
1645 HEVCLocalContext *lc = &s->HEVClc;
1646 enum InterPredIdc inter_pred_idc = PRED_L0;
1649 ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
1650 if (s->sh.slice_type == HEVC_SLICE_B)
1651 inter_pred_idc = ff_hevc_inter_pred_idc_decode(s, nPbW, nPbH);
1653 if (inter_pred_idc != PRED_L1) {
1654 if (s->sh.nb_refs[L0])
1655 mv->ref_idx[0]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L0]);
1657 mv->pred_flag[0] = 1;
1658 hls_mvd_coding(s, x0, y0, 0);
1659 mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1660 ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1661 part_idx, merge_idx, mv, mvp_flag, 0);
1662 mv->mv[0].x += lc->pu.mvd.x;
1663 mv->mv[0].y += lc->pu.mvd.y;
1666 if (inter_pred_idc != PRED_L0) {
1667 if (s->sh.nb_refs[L1])
1668 mv->ref_idx[1]= ff_hevc_ref_idx_lx_decode(s, s->sh.nb_refs[L1]);
1670 if (s->sh.mvd_l1_zero_flag == 1 && inter_pred_idc == PRED_BI) {
1671 AV_ZERO32(&lc->pu.mvd);
1673 hls_mvd_coding(s, x0, y0, 1);
1676 mv->pred_flag[1] = 1;
1677 mvp_flag = ff_hevc_mvp_lx_flag_decode(s);
1678 ff_hevc_luma_mv_mvp_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1679 part_idx, merge_idx, mv, mvp_flag, 1);
1680 mv->mv[1].x += lc->pu.mvd.x;
1681 mv->mv[1].y += lc->pu.mvd.y;
1685 static void hls_prediction_unit(HEVCContext *s, int x0, int y0,
1687 int log2_cb_size, int partIdx)
1689 static const int pred_indices[] = {
1690 [4] = 0, [8] = 1, [12] = 2, [16] = 3, [24] = 4, [32] = 5, [48] = 6, [64] = 7,
1692 const int pred_idx = pred_indices[nPbW];
1694 #define POS(c_idx, x, y) \
1695 &s->frame->data[c_idx][((y) >> s->ps.sps->vshift[c_idx]) * s->frame->linesize[c_idx] + \
1696 (((x) >> s->ps.sps->hshift[c_idx]) << s->ps.sps->pixel_shift)]
1697 HEVCLocalContext *lc = &s->HEVClc;
1699 struct MvField current_mv = {{{ 0 }}};
1701 int min_pu_width = s->ps.sps->min_pu_width;
1702 int weighted_pred = (s->sh.slice_type == HEVC_SLICE_P && s->ps.pps->weighted_pred_flag) ||
1703 (s->sh.slice_type == HEVC_SLICE_B && s->ps.pps->weighted_bipred_flag);
1705 MvField *tab_mvf = s->ref->tab_mvf;
1706 RefPicList *refPicList = s->ref->refPicList;
1707 HEVCFrame *ref0, *ref1;
1709 ptrdiff_t tmpstride = MAX_PB_SIZE * sizeof(int16_t);
1711 uint8_t *dst0 = POS(0, x0, y0);
1712 uint8_t *dst1 = POS(1, x0, y0);
1713 uint8_t *dst2 = POS(2, x0, y0);
1714 int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
1715 int min_cb_width = s->ps.sps->min_cb_width;
1716 int x_cb = x0 >> log2_min_cb_size;
1717 int y_cb = y0 >> log2_min_cb_size;
1721 int skip_flag = SAMPLE_CTB(s->skip_flag, x_cb, y_cb);
1724 lc->pu.merge_flag = ff_hevc_merge_flag_decode(s);
1726 if (skip_flag || lc->pu.merge_flag) {
1727 if (s->sh.max_num_merge_cand > 1)
1728 merge_idx = ff_hevc_merge_idx_decode(s);
1732 ff_hevc_luma_mv_merge_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1733 partIdx, merge_idx, ¤t_mv);
1735 hevc_luma_mv_mpv_mode(s, x0, y0, nPbW, nPbH, log2_cb_size,
1736 partIdx, merge_idx, ¤t_mv);
1739 x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1740 y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1742 for (j = 0; j < nPbH >> s->ps.sps->log2_min_pu_size; j++)
1743 for (i = 0; i < nPbW >> s->ps.sps->log2_min_pu_size; i++)
1744 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i] = current_mv;
1746 if (current_mv.pred_flag[0]) {
1747 ref0 = refPicList[0].ref[current_mv.ref_idx[0]];
1750 hevc_await_progress(s, ref0, ¤t_mv.mv[0], y0, nPbH);
1752 if (current_mv.pred_flag[1]) {
1753 ref1 = refPicList[1].ref[current_mv.ref_idx[1]];
1756 hevc_await_progress(s, ref1, ¤t_mv.mv[1], y0, nPbH);
1759 if (current_mv.pred_flag[0] && !current_mv.pred_flag[1]) {
1760 LOCAL_ALIGNED_16(int16_t, tmp, [MAX_PB_SIZE * MAX_PB_SIZE]);
1761 LOCAL_ALIGNED_16(int16_t, tmp2, [MAX_PB_SIZE * MAX_PB_SIZE]);
1763 luma_mc(s, tmp, tmpstride, ref0->frame,
1764 ¤t_mv.mv[0], x0, y0, nPbW, nPbH, pred_idx);
1766 if (weighted_pred) {
1767 s->hevcdsp.weighted_pred[pred_idx](s->sh.luma_log2_weight_denom,
1768 s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1769 s->sh.luma_offset_l0[current_mv.ref_idx[0]],
1770 dst0, s->frame->linesize[0], tmp,
1773 s->hevcdsp.put_unweighted_pred[pred_idx](dst0, s->frame->linesize[0], tmp, tmpstride, nPbH);
1775 chroma_mc(s, tmp, tmp2, tmpstride, ref0->frame,
1776 ¤t_mv.mv[0], x0 / 2, y0 / 2, nPbW / 2, nPbH / 2, pred_idx);
1778 if (weighted_pred) {
1779 s->hevcdsp.weighted_pred_chroma[pred_idx](s->sh.chroma_log2_weight_denom,
1780 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0],
1781 s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0],
1782 dst1, s->frame->linesize[1], tmp, tmpstride,
1784 s->hevcdsp.weighted_pred_chroma[pred_idx](s->sh.chroma_log2_weight_denom,
1785 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1],
1786 s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1],
1787 dst2, s->frame->linesize[2], tmp2, tmpstride,
1790 s->hevcdsp.put_unweighted_pred_chroma[pred_idx](dst1, s->frame->linesize[1], tmp, tmpstride, nPbH / 2);
1791 s->hevcdsp.put_unweighted_pred_chroma[pred_idx](dst2, s->frame->linesize[2], tmp2, tmpstride, nPbH / 2);
1793 } else if (!current_mv.pred_flag[0] && current_mv.pred_flag[1]) {
1794 LOCAL_ALIGNED_16(int16_t, tmp, [MAX_PB_SIZE * MAX_PB_SIZE]);
1795 LOCAL_ALIGNED_16(int16_t, tmp2, [MAX_PB_SIZE * MAX_PB_SIZE]);
1797 luma_mc(s, tmp, tmpstride, ref1->frame,
1798 ¤t_mv.mv[1], x0, y0, nPbW, nPbH, pred_idx);
1800 if (weighted_pred) {
1801 s->hevcdsp.weighted_pred[pred_idx](s->sh.luma_log2_weight_denom,
1802 s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1803 s->sh.luma_offset_l1[current_mv.ref_idx[1]],
1804 dst0, s->frame->linesize[0], tmp, tmpstride,
1807 s->hevcdsp.put_unweighted_pred[pred_idx](dst0, s->frame->linesize[0], tmp, tmpstride, nPbH);
1810 chroma_mc(s, tmp, tmp2, tmpstride, ref1->frame,
1811 ¤t_mv.mv[1], x0 / 2, y0 / 2, nPbW / 2, nPbH / 2, pred_idx);
1813 if (weighted_pred) {
1814 s->hevcdsp.weighted_pred_chroma[pred_idx](s->sh.chroma_log2_weight_denom,
1815 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0],
1816 s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0],
1817 dst1, s->frame->linesize[1], tmp, tmpstride, nPbH/2);
1818 s->hevcdsp.weighted_pred_chroma[pred_idx](s->sh.chroma_log2_weight_denom,
1819 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1],
1820 s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1],
1821 dst2, s->frame->linesize[2], tmp2, tmpstride, nPbH/2);
1823 s->hevcdsp.put_unweighted_pred_chroma[pred_idx](dst1, s->frame->linesize[1], tmp, tmpstride, nPbH / 2);
1824 s->hevcdsp.put_unweighted_pred_chroma[pred_idx](dst2, s->frame->linesize[2], tmp2, tmpstride, nPbH / 2);
1826 } else if (current_mv.pred_flag[0] && current_mv.pred_flag[1]) {
1827 LOCAL_ALIGNED_16(int16_t, tmp, [MAX_PB_SIZE * MAX_PB_SIZE]);
1828 LOCAL_ALIGNED_16(int16_t, tmp2, [MAX_PB_SIZE * MAX_PB_SIZE]);
1829 LOCAL_ALIGNED_16(int16_t, tmp3, [MAX_PB_SIZE * MAX_PB_SIZE]);
1830 LOCAL_ALIGNED_16(int16_t, tmp4, [MAX_PB_SIZE * MAX_PB_SIZE]);
1832 luma_mc(s, tmp, tmpstride, ref0->frame,
1833 ¤t_mv.mv[0], x0, y0, nPbW, nPbH, pred_idx);
1834 luma_mc(s, tmp2, tmpstride, ref1->frame,
1835 ¤t_mv.mv[1], x0, y0, nPbW, nPbH, pred_idx);
1837 if (weighted_pred) {
1838 s->hevcdsp.weighted_pred_avg[pred_idx](s->sh.luma_log2_weight_denom,
1839 s->sh.luma_weight_l0[current_mv.ref_idx[0]],
1840 s->sh.luma_weight_l1[current_mv.ref_idx[1]],
1841 s->sh.luma_offset_l0[current_mv.ref_idx[0]],
1842 s->sh.luma_offset_l1[current_mv.ref_idx[1]],
1843 dst0, s->frame->linesize[0],
1844 tmp, tmp2, tmpstride, nPbH);
1846 s->hevcdsp.put_unweighted_pred_avg[pred_idx](dst0, s->frame->linesize[0],
1847 tmp, tmp2, tmpstride, nPbH);
1850 chroma_mc(s, tmp, tmp2, tmpstride, ref0->frame,
1851 ¤t_mv.mv[0], x0 / 2, y0 / 2, nPbW / 2, nPbH / 2, pred_idx);
1852 chroma_mc(s, tmp3, tmp4, tmpstride, ref1->frame,
1853 ¤t_mv.mv[1], x0 / 2, y0 / 2, nPbW / 2, nPbH / 2, pred_idx);
1855 if (weighted_pred) {
1856 s->hevcdsp.weighted_pred_avg_chroma[pred_idx](s->sh.chroma_log2_weight_denom,
1857 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][0],
1858 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][0],
1859 s->sh.chroma_offset_l0[current_mv.ref_idx[0]][0],
1860 s->sh.chroma_offset_l1[current_mv.ref_idx[1]][0],
1861 dst1, s->frame->linesize[1], tmp, tmp3,
1862 tmpstride, nPbH / 2);
1863 s->hevcdsp.weighted_pred_avg_chroma[pred_idx](s->sh.chroma_log2_weight_denom,
1864 s->sh.chroma_weight_l0[current_mv.ref_idx[0]][1],
1865 s->sh.chroma_weight_l1[current_mv.ref_idx[1]][1],
1866 s->sh.chroma_offset_l0[current_mv.ref_idx[0]][1],
1867 s->sh.chroma_offset_l1[current_mv.ref_idx[1]][1],
1868 dst2, s->frame->linesize[2], tmp2, tmp4,
1869 tmpstride, nPbH / 2);
1871 s->hevcdsp.put_unweighted_pred_avg_chroma[pred_idx](dst1, s->frame->linesize[1], tmp, tmp3, tmpstride, nPbH/2);
1872 s->hevcdsp.put_unweighted_pred_avg_chroma[pred_idx](dst2, s->frame->linesize[2], tmp2, tmp4, tmpstride, nPbH/2);
1880 static int luma_intra_pred_mode(HEVCContext *s, int x0, int y0, int pu_size,
1881 int prev_intra_luma_pred_flag)
1883 HEVCLocalContext *lc = &s->HEVClc;
1884 int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
1885 int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
1886 int min_pu_width = s->ps.sps->min_pu_width;
1887 int size_in_pus = pu_size >> s->ps.sps->log2_min_pu_size;
1888 int x0b = x0 & ((1 << s->ps.sps->log2_ctb_size) - 1);
1889 int y0b = y0 & ((1 << s->ps.sps->log2_ctb_size) - 1);
1891 int cand_up = (lc->ctb_up_flag || y0b) ?
1892 s->tab_ipm[(y_pu - 1) * min_pu_width + x_pu] : INTRA_DC;
1893 int cand_left = (lc->ctb_left_flag || x0b) ?
1894 s->tab_ipm[y_pu * min_pu_width + x_pu - 1] : INTRA_DC;
1896 int y_ctb = (y0 >> (s->ps.sps->log2_ctb_size)) << (s->ps.sps->log2_ctb_size);
1898 MvField *tab_mvf = s->ref->tab_mvf;
1899 int intra_pred_mode;
1903 // intra_pred_mode prediction does not cross vertical CTB boundaries
1904 if ((y0 - 1) < y_ctb)
1907 if (cand_left == cand_up) {
1908 if (cand_left < 2) {
1909 candidate[0] = INTRA_PLANAR;
1910 candidate[1] = INTRA_DC;
1911 candidate[2] = INTRA_ANGULAR_26;
1913 candidate[0] = cand_left;
1914 candidate[1] = 2 + ((cand_left - 2 - 1 + 32) & 31);
1915 candidate[2] = 2 + ((cand_left - 2 + 1) & 31);
1918 candidate[0] = cand_left;
1919 candidate[1] = cand_up;
1920 if (candidate[0] != INTRA_PLANAR && candidate[1] != INTRA_PLANAR) {
1921 candidate[2] = INTRA_PLANAR;
1922 } else if (candidate[0] != INTRA_DC && candidate[1] != INTRA_DC) {
1923 candidate[2] = INTRA_DC;
1925 candidate[2] = INTRA_ANGULAR_26;
1929 if (prev_intra_luma_pred_flag) {
1930 intra_pred_mode = candidate[lc->pu.mpm_idx];
1932 if (candidate[0] > candidate[1])
1933 FFSWAP(uint8_t, candidate[0], candidate[1]);
1934 if (candidate[0] > candidate[2])
1935 FFSWAP(uint8_t, candidate[0], candidate[2]);
1936 if (candidate[1] > candidate[2])
1937 FFSWAP(uint8_t, candidate[1], candidate[2]);
1939 intra_pred_mode = lc->pu.rem_intra_luma_pred_mode;
1940 for (i = 0; i < 3; i++)
1941 if (intra_pred_mode >= candidate[i])
1945 /* write the intra prediction units into the mv array */
1948 for (i = 0; i < size_in_pus; i++) {
1949 memset(&s->tab_ipm[(y_pu + i) * min_pu_width + x_pu],
1950 intra_pred_mode, size_in_pus);
1952 for (j = 0; j < size_in_pus; j++) {
1953 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].is_intra = 1;
1954 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag[0] = 0;
1955 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].pred_flag[1] = 0;
1956 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].ref_idx[0] = 0;
1957 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].ref_idx[1] = 0;
1958 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].mv[0].x = 0;
1959 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].mv[0].y = 0;
1960 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].mv[1].x = 0;
1961 tab_mvf[(y_pu + j) * min_pu_width + x_pu + i].mv[1].y = 0;
1965 return intra_pred_mode;
1968 static av_always_inline void set_ct_depth(HEVCContext *s, int x0, int y0,
1969 int log2_cb_size, int ct_depth)
1971 int length = (1 << log2_cb_size) >> s->ps.sps->log2_min_cb_size;
1972 int x_cb = x0 >> s->ps.sps->log2_min_cb_size;
1973 int y_cb = y0 >> s->ps.sps->log2_min_cb_size;
1976 for (y = 0; y < length; y++)
1977 memset(&s->tab_ct_depth[(y_cb + y) * s->ps.sps->min_cb_width + x_cb],
1981 static void intra_prediction_unit(HEVCContext *s, int x0, int y0,
1984 HEVCLocalContext *lc = &s->HEVClc;
1985 static const uint8_t intra_chroma_table[4] = { 0, 26, 10, 1 };
1986 uint8_t prev_intra_luma_pred_flag[4];
1987 int split = lc->cu.part_mode == PART_NxN;
1988 int pb_size = (1 << log2_cb_size) >> split;
1989 int side = split + 1;
1993 for (i = 0; i < side; i++)
1994 for (j = 0; j < side; j++)
1995 prev_intra_luma_pred_flag[2 * i + j] = ff_hevc_prev_intra_luma_pred_flag_decode(s);
1997 for (i = 0; i < side; i++) {
1998 for (j = 0; j < side; j++) {
1999 if (prev_intra_luma_pred_flag[2 * i + j])
2000 lc->pu.mpm_idx = ff_hevc_mpm_idx_decode(s);
2002 lc->pu.rem_intra_luma_pred_mode = ff_hevc_rem_intra_luma_pred_mode_decode(s);
2004 lc->pu.intra_pred_mode[2 * i + j] =
2005 luma_intra_pred_mode(s, x0 + pb_size * j, y0 + pb_size * i, pb_size,
2006 prev_intra_luma_pred_flag[2 * i + j]);
2010 chroma_mode = ff_hevc_intra_chroma_pred_mode_decode(s);
2011 if (chroma_mode != 4) {
2012 if (lc->pu.intra_pred_mode[0] == intra_chroma_table[chroma_mode])
2013 lc->pu.intra_pred_mode_c = 34;
2015 lc->pu.intra_pred_mode_c = intra_chroma_table[chroma_mode];
2017 lc->pu.intra_pred_mode_c = lc->pu.intra_pred_mode[0];
2021 static void intra_prediction_unit_default_value(HEVCContext *s,
2025 HEVCLocalContext *lc = &s->HEVClc;
2026 int pb_size = 1 << log2_cb_size;
2027 int size_in_pus = pb_size >> s->ps.sps->log2_min_pu_size;
2028 int min_pu_width = s->ps.sps->min_pu_width;
2029 MvField *tab_mvf = s->ref->tab_mvf;
2030 int x_pu = x0 >> s->ps.sps->log2_min_pu_size;
2031 int y_pu = y0 >> s->ps.sps->log2_min_pu_size;
2034 if (size_in_pus == 0)
2036 for (j = 0; j < size_in_pus; j++) {
2037 memset(&s->tab_ipm[(y_pu + j) * min_pu_width + x_pu], INTRA_DC, size_in_pus);
2038 for (k = 0; k < size_in_pus; k++)
2039 tab_mvf[(y_pu + j) * min_pu_width + x_pu + k].is_intra = lc->cu.pred_mode == MODE_INTRA;
2043 static int hls_coding_unit(HEVCContext *s, int x0, int y0, int log2_cb_size)
2045 int cb_size = 1 << log2_cb_size;
2046 HEVCLocalContext *lc = &s->HEVClc;
2047 int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
2048 int length = cb_size >> log2_min_cb_size;
2049 int min_cb_width = s->ps.sps->min_cb_width;
2050 int x_cb = x0 >> log2_min_cb_size;
2051 int y_cb = y0 >> log2_min_cb_size;
2056 lc->cu.pred_mode = MODE_INTRA;
2057 lc->cu.part_mode = PART_2Nx2N;
2058 lc->cu.intra_split_flag = 0;
2060 SAMPLE_CTB(s->skip_flag, x_cb, y_cb) = 0;
2061 for (x = 0; x < 4; x++)
2062 lc->pu.intra_pred_mode[x] = 1;
2063 if (s->ps.pps->transquant_bypass_enable_flag) {
2064 lc->cu.cu_transquant_bypass_flag = ff_hevc_cu_transquant_bypass_flag_decode(s);
2065 if (lc->cu.cu_transquant_bypass_flag)
2066 set_deblocking_bypass(s, x0, y0, log2_cb_size);
2068 lc->cu.cu_transquant_bypass_flag = 0;
2070 if (s->sh.slice_type != HEVC_SLICE_I) {
2071 uint8_t skip_flag = ff_hevc_skip_flag_decode(s, x0, y0, x_cb, y_cb);
2073 x = y_cb * min_cb_width + x_cb;
2074 for (y = 0; y < length; y++) {
2075 memset(&s->skip_flag[x], skip_flag, length);
2078 lc->cu.pred_mode = skip_flag ? MODE_SKIP : MODE_INTER;
2081 if (SAMPLE_CTB(s->skip_flag, x_cb, y_cb)) {
2082 hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0);
2083 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2085 if (!s->sh.disable_deblocking_filter_flag)
2086 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2090 if (s->sh.slice_type != HEVC_SLICE_I)
2091 lc->cu.pred_mode = ff_hevc_pred_mode_decode(s);
2092 if (lc->cu.pred_mode != MODE_INTRA ||
2093 log2_cb_size == s->ps.sps->log2_min_cb_size) {
2094 lc->cu.part_mode = ff_hevc_part_mode_decode(s, log2_cb_size);
2095 lc->cu.intra_split_flag = lc->cu.part_mode == PART_NxN &&
2096 lc->cu.pred_mode == MODE_INTRA;
2099 if (lc->cu.pred_mode == MODE_INTRA) {
2100 if (lc->cu.part_mode == PART_2Nx2N && s->ps.sps->pcm_enabled_flag &&
2101 log2_cb_size >= s->ps.sps->pcm.log2_min_pcm_cb_size &&
2102 log2_cb_size <= s->ps.sps->pcm.log2_max_pcm_cb_size) {
2103 pcm_flag = ff_hevc_pcm_flag_decode(s);
2106 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2107 ret = hls_pcm_sample(s, x0, y0, log2_cb_size);
2108 if (s->ps.sps->pcm.loop_filter_disable_flag)
2109 set_deblocking_bypass(s, x0, y0, log2_cb_size);
2114 intra_prediction_unit(s, x0, y0, log2_cb_size);
2117 intra_prediction_unit_default_value(s, x0, y0, log2_cb_size);
2118 switch (lc->cu.part_mode) {
2120 hls_prediction_unit(s, x0, y0, cb_size, cb_size, log2_cb_size, 0);
2123 hls_prediction_unit(s, x0, y0, cb_size, cb_size / 2, log2_cb_size, 0);
2124 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size, cb_size / 2, log2_cb_size, 1);
2127 hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size, log2_cb_size, 0);
2128 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size, log2_cb_size, 1);
2131 hls_prediction_unit(s, x0, y0, cb_size, cb_size / 4, log2_cb_size, 0);
2132 hls_prediction_unit(s, x0, y0 + cb_size / 4, cb_size, cb_size * 3 / 4, log2_cb_size, 1);
2135 hls_prediction_unit(s, x0, y0, cb_size, cb_size * 3 / 4, log2_cb_size, 0);
2136 hls_prediction_unit(s, x0, y0 + cb_size * 3 / 4, cb_size, cb_size / 4, log2_cb_size, 1);
2139 hls_prediction_unit(s, x0, y0, cb_size / 4, cb_size, log2_cb_size, 0);
2140 hls_prediction_unit(s, x0 + cb_size / 4, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 1);
2143 hls_prediction_unit(s, x0, y0, cb_size * 3 / 4, cb_size, log2_cb_size, 0);
2144 hls_prediction_unit(s, x0 + cb_size * 3 / 4, y0, cb_size / 4, cb_size, log2_cb_size, 1);
2147 hls_prediction_unit(s, x0, y0, cb_size / 2, cb_size / 2, log2_cb_size, 0);
2148 hls_prediction_unit(s, x0 + cb_size / 2, y0, cb_size / 2, cb_size / 2, log2_cb_size, 1);
2149 hls_prediction_unit(s, x0, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 2);
2150 hls_prediction_unit(s, x0 + cb_size / 2, y0 + cb_size / 2, cb_size / 2, cb_size / 2, log2_cb_size, 3);
2156 int rqt_root_cbf = 1;
2158 if (lc->cu.pred_mode != MODE_INTRA &&
2159 !(lc->cu.part_mode == PART_2Nx2N && lc->pu.merge_flag)) {
2160 rqt_root_cbf = ff_hevc_no_residual_syntax_flag_decode(s);
2163 lc->cu.max_trafo_depth = lc->cu.pred_mode == MODE_INTRA ?
2164 s->ps.sps->max_transform_hierarchy_depth_intra + lc->cu.intra_split_flag :
2165 s->ps.sps->max_transform_hierarchy_depth_inter;
2166 ret = hls_transform_tree(s, x0, y0, x0, y0, x0, y0,
2168 log2_cb_size, 0, 0, 0, 0);
2172 if (!s->sh.disable_deblocking_filter_flag)
2173 ff_hevc_deblocking_boundary_strengths(s, x0, y0, log2_cb_size);
2178 if (s->ps.pps->cu_qp_delta_enabled_flag && lc->tu.is_cu_qp_delta_coded == 0)
2179 ff_hevc_set_qPy(s, x0, y0, x0, y0, log2_cb_size);
2181 x = y_cb * min_cb_width + x_cb;
2182 for (y = 0; y < length; y++) {
2183 memset(&s->qp_y_tab[x], lc->qp_y, length);
2187 set_ct_depth(s, x0, y0, log2_cb_size, lc->ct.depth);
2192 static int hls_coding_quadtree(HEVCContext *s, int x0, int y0,
2193 int log2_cb_size, int cb_depth)
2195 HEVCLocalContext *lc = &s->HEVClc;
2196 const int cb_size = 1 << log2_cb_size;
2199 lc->ct.depth = cb_depth;
2200 if (x0 + cb_size <= s->ps.sps->width &&
2201 y0 + cb_size <= s->ps.sps->height &&
2202 log2_cb_size > s->ps.sps->log2_min_cb_size) {
2203 split_cu = ff_hevc_split_coding_unit_flag_decode(s, cb_depth, x0, y0);
2205 split_cu = (log2_cb_size > s->ps.sps->log2_min_cb_size);
2207 if (s->ps.pps->cu_qp_delta_enabled_flag &&
2208 log2_cb_size >= s->ps.sps->log2_ctb_size - s->ps.pps->diff_cu_qp_delta_depth) {
2209 lc->tu.is_cu_qp_delta_coded = 0;
2210 lc->tu.cu_qp_delta = 0;
2214 const int cb_size_split = cb_size >> 1;
2215 const int x1 = x0 + cb_size_split;
2216 const int y1 = y0 + cb_size_split;
2221 #define SUBDIVIDE(x, y) \
2223 if (x < s->ps.sps->width && y < s->ps.sps->height) { \
2224 int ret = hls_coding_quadtree(s, x, y, log2_cb_size, cb_depth);\
2235 int ret = hls_coding_unit(s, x0, y0, log2_cb_size);
2243 static void hls_decode_neighbour(HEVCContext *s, int x_ctb, int y_ctb,
2246 HEVCLocalContext *lc = &s->HEVClc;
2247 int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2248 int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2249 int ctb_addr_in_slice = ctb_addr_rs - s->sh.slice_addr;
2251 s->tab_slice_address[ctb_addr_rs] = s->sh.slice_addr;
2253 if (s->ps.pps->entropy_coding_sync_enabled_flag) {
2254 if (x_ctb == 0 && (y_ctb & (ctb_size - 1)) == 0)
2255 lc->first_qp_group = 1;
2256 lc->end_of_tiles_x = s->ps.sps->width;
2257 } else if (s->ps.pps->tiles_enabled_flag) {
2258 if (ctb_addr_ts && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[ctb_addr_ts - 1]) {
2259 int idxX = s->ps.pps->col_idxX[x_ctb >> s->ps.sps->log2_ctb_size];
2260 lc->start_of_tiles_x = x_ctb;
2261 lc->end_of_tiles_x = x_ctb + (s->ps.pps->column_width[idxX] << s->ps.sps->log2_ctb_size);
2262 lc->first_qp_group = 1;
2265 lc->end_of_tiles_x = s->ps.sps->width;
2268 lc->end_of_tiles_y = FFMIN(y_ctb + ctb_size, s->ps.sps->height);
2270 lc->boundary_flags = 0;
2271 if (s->ps.pps->tiles_enabled_flag) {
2272 if (x_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - 1]])
2273 lc->boundary_flags |= BOUNDARY_LEFT_TILE;
2274 if (x_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - 1])
2275 lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
2276 if (y_ctb > 0 && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]])
2277 lc->boundary_flags |= BOUNDARY_UPPER_TILE;
2278 if (y_ctb > 0 && s->tab_slice_address[ctb_addr_rs] != s->tab_slice_address[ctb_addr_rs - s->ps.sps->ctb_width])
2279 lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
2281 if (!ctb_addr_in_slice)
2282 lc->boundary_flags |= BOUNDARY_LEFT_SLICE;
2283 if (ctb_addr_in_slice < s->ps.sps->ctb_width)
2284 lc->boundary_flags |= BOUNDARY_UPPER_SLICE;
2287 lc->ctb_left_flag = ((x_ctb > 0) && (ctb_addr_in_slice > 0) && !(lc->boundary_flags & BOUNDARY_LEFT_TILE));
2288 lc->ctb_up_flag = ((y_ctb > 0) && (ctb_addr_in_slice >= s->ps.sps->ctb_width) && !(lc->boundary_flags & BOUNDARY_UPPER_TILE));
2289 lc->ctb_up_right_flag = ((y_ctb > 0) && (ctb_addr_in_slice+1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1 - s->ps.sps->ctb_width]]));
2290 lc->ctb_up_left_flag = ((x_ctb > 0) && (y_ctb > 0) && (ctb_addr_in_slice-1 >= s->ps.sps->ctb_width) && (s->ps.pps->tile_id[ctb_addr_ts] == s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1 - s->ps.sps->ctb_width]]));
2293 static int hls_slice_data(HEVCContext *s)
2295 int ctb_size = 1 << s->ps.sps->log2_ctb_size;
2299 int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[s->sh.slice_ctb_addr_rs];
2302 while (more_data && ctb_addr_ts < s->ps.sps->ctb_size) {
2303 int ctb_addr_rs = s->ps.pps->ctb_addr_ts_to_rs[ctb_addr_ts];
2305 x_ctb = (ctb_addr_rs % ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2306 y_ctb = (ctb_addr_rs / ((s->ps.sps->width + ctb_size - 1) >> s->ps.sps->log2_ctb_size)) << s->ps.sps->log2_ctb_size;
2307 hls_decode_neighbour(s, x_ctb, y_ctb, ctb_addr_ts);
2309 ff_hevc_cabac_init(s, ctb_addr_ts);
2311 hls_sao_param(s, x_ctb >> s->ps.sps->log2_ctb_size, y_ctb >> s->ps.sps->log2_ctb_size);
2313 s->deblock[ctb_addr_rs].beta_offset = s->sh.beta_offset;
2314 s->deblock[ctb_addr_rs].tc_offset = s->sh.tc_offset;
2315 s->filter_slice_edges[ctb_addr_rs] = s->sh.slice_loop_filter_across_slices_enabled_flag;
2317 ret = hls_coding_quadtree(s, x_ctb, y_ctb, s->ps.sps->log2_ctb_size, 0);
2320 more_data = !ff_hevc_end_of_slice_flag_decode(s);
2323 ff_hevc_save_states(s, ctb_addr_ts);
2324 ff_hevc_hls_filters(s, x_ctb, y_ctb, ctb_size);
2327 if (x_ctb + ctb_size >= s->ps.sps->width &&
2328 y_ctb + ctb_size >= s->ps.sps->height)
2329 ff_hevc_hls_filter(s, x_ctb, y_ctb);
2334 static void restore_tqb_pixels(HEVCContext *s)
2336 int min_pu_size = 1 << s->ps.sps->log2_min_pu_size;
2339 for (c_idx = 0; c_idx < 3; c_idx++) {
2340 ptrdiff_t stride = s->frame->linesize[c_idx];
2341 int hshift = s->ps.sps->hshift[c_idx];
2342 int vshift = s->ps.sps->vshift[c_idx];
2343 for (y = 0; y < s->ps.sps->min_pu_height; y++) {
2344 for (x = 0; x < s->ps.sps->min_pu_width; x++) {
2345 if (s->is_pcm[y * s->ps.sps->min_pu_width + x]) {
2347 int len = min_pu_size >> hshift;
2348 uint8_t *src = &s->frame->data[c_idx][((y << s->ps.sps->log2_min_pu_size) >> vshift) * stride + (((x << s->ps.sps->log2_min_pu_size) >> hshift) << s->ps.sps->pixel_shift)];
2349 uint8_t *dst = &s->sao_frame->data[c_idx][((y << s->ps.sps->log2_min_pu_size) >> vshift) * stride + (((x << s->ps.sps->log2_min_pu_size) >> hshift) << s->ps.sps->pixel_shift)];
2350 for (n = 0; n < (min_pu_size >> vshift); n++) {
2351 memcpy(dst, src, len);
2361 static int set_side_data(HEVCContext *s)
2363 AVFrame *out = s->ref->frame;
2365 if (s->sei_frame_packing_present &&
2366 s->frame_packing_arrangement_type >= 3 &&
2367 s->frame_packing_arrangement_type <= 5 &&
2368 s->content_interpretation_type > 0 &&
2369 s->content_interpretation_type < 3) {
2370 AVStereo3D *stereo = av_stereo3d_create_side_data(out);
2372 return AVERROR(ENOMEM);
2374 switch (s->frame_packing_arrangement_type) {
2376 if (s->quincunx_subsampling)
2377 stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
2379 stereo->type = AV_STEREO3D_SIDEBYSIDE;
2382 stereo->type = AV_STEREO3D_TOPBOTTOM;
2385 stereo->type = AV_STEREO3D_FRAMESEQUENCE;
2389 if (s->content_interpretation_type == 2)
2390 stereo->flags = AV_STEREO3D_FLAG_INVERT;
2393 if (s->sei_display_orientation_present &&
2394 (s->sei_anticlockwise_rotation || s->sei_hflip || s->sei_vflip)) {
2395 double angle = s->sei_anticlockwise_rotation * 360 / (double) (1 << 16);
2396 AVFrameSideData *rotation = av_frame_new_side_data(out,
2397 AV_FRAME_DATA_DISPLAYMATRIX,
2398 sizeof(int32_t) * 9);
2400 return AVERROR(ENOMEM);
2402 av_display_rotation_set((int32_t *)rotation->data, angle);
2403 av_display_matrix_flip((int32_t *)rotation->data,
2404 s->sei_hflip, s->sei_vflip);
2410 static int hevc_frame_start(HEVCContext *s)
2412 HEVCLocalContext *lc = &s->HEVClc;
2415 memset(s->horizontal_bs, 0, 2 * s->bs_width * (s->bs_height + 1));
2416 memset(s->vertical_bs, 0, 2 * s->bs_width * (s->bs_height + 1));
2417 memset(s->cbf_luma, 0, s->ps.sps->min_tb_width * s->ps.sps->min_tb_height);
2418 memset(s->is_pcm, 0, s->ps.sps->min_pu_width * s->ps.sps->min_pu_height);
2420 lc->start_of_tiles_x = 0;
2422 s->first_nal_type = s->nal_unit_type;
2424 if (s->ps.pps->tiles_enabled_flag)
2425 lc->end_of_tiles_x = s->ps.pps->column_width[0] << s->ps.sps->log2_ctb_size;
2427 ret = ff_hevc_set_new_ref(s, s->ps.sps->sao_enabled ? &s->sao_frame : &s->frame,
2432 ret = ff_hevc_frame_rps(s);
2434 av_log(s->avctx, AV_LOG_ERROR, "Error constructing the frame RPS.\n");
2438 s->ref->frame->key_frame = IS_IRAP(s);
2440 ret = set_side_data(s);
2444 av_frame_unref(s->output_frame);
2445 ret = ff_hevc_output_frame(s, s->output_frame, 0);
2449 ff_thread_finish_setup(s->avctx);
2455 ff_hevc_unref_frame(s, s->ref, ~0);
2460 static int decode_nal_unit(HEVCContext *s, const H2645NAL *nal)
2462 HEVCLocalContext *lc = &s->HEVClc;
2463 GetBitContext *gb = &lc->gb;
2464 int ctb_addr_ts, ret;
2467 s->nal_unit_type = nal->type;
2468 s->temporal_id = nal->temporal_id;
2470 switch (s->nal_unit_type) {
2472 ret = ff_hevc_decode_nal_vps(gb, s->avctx, &s->ps);
2477 ret = ff_hevc_decode_nal_sps(gb, s->avctx, &s->ps,
2478 s->apply_defdispwin);
2483 ret = ff_hevc_decode_nal_pps(gb, s->avctx, &s->ps);
2487 case HEVC_NAL_SEI_PREFIX:
2488 case HEVC_NAL_SEI_SUFFIX:
2489 ret = ff_hevc_decode_nal_sei(s);
2493 case HEVC_NAL_TRAIL_R:
2494 case HEVC_NAL_TRAIL_N:
2495 case HEVC_NAL_TSA_N:
2496 case HEVC_NAL_TSA_R:
2497 case HEVC_NAL_STSA_N:
2498 case HEVC_NAL_STSA_R:
2499 case HEVC_NAL_BLA_W_LP:
2500 case HEVC_NAL_BLA_W_RADL:
2501 case HEVC_NAL_BLA_N_LP:
2502 case HEVC_NAL_IDR_W_RADL:
2503 case HEVC_NAL_IDR_N_LP:
2504 case HEVC_NAL_CRA_NUT:
2505 case HEVC_NAL_RADL_N:
2506 case HEVC_NAL_RADL_R:
2507 case HEVC_NAL_RASL_N:
2508 case HEVC_NAL_RASL_R:
2509 ret = hls_slice_header(s);
2513 if (s->max_ra == INT_MAX) {
2514 if (s->nal_unit_type == HEVC_NAL_CRA_NUT || IS_BLA(s)) {
2518 s->max_ra = INT_MIN;
2522 if ((s->nal_unit_type == HEVC_NAL_RASL_R || s->nal_unit_type == HEVC_NAL_RASL_N) &&
2523 s->poc <= s->max_ra) {
2527 if (s->nal_unit_type == HEVC_NAL_RASL_R && s->poc > s->max_ra)
2528 s->max_ra = INT_MIN;
2531 if (s->sh.first_slice_in_pic_flag) {
2532 ret = hevc_frame_start(s);
2535 } else if (!s->ref) {
2536 av_log(s->avctx, AV_LOG_ERROR, "First slice in a frame missing.\n");
2540 if (s->nal_unit_type != s->first_nal_type) {
2541 av_log(s->avctx, AV_LOG_ERROR,
2542 "Non-matching NAL types of the VCL NALUs: %d %d\n",
2543 s->first_nal_type, s->nal_unit_type);
2544 return AVERROR_INVALIDDATA;
2547 if (!s->sh.dependent_slice_segment_flag &&
2548 s->sh.slice_type != HEVC_SLICE_I) {
2549 ret = ff_hevc_slice_rpl(s);
2551 av_log(s->avctx, AV_LOG_WARNING,
2552 "Error constructing the reference lists for the current slice.\n");
2557 if (s->sh.first_slice_in_pic_flag && s->avctx->hwaccel) {
2558 ret = s->avctx->hwaccel->start_frame(s->avctx, NULL, 0);
2563 if (s->avctx->hwaccel) {
2564 ret = s->avctx->hwaccel->decode_slice(s->avctx, nal->raw_data, nal->raw_size);
2568 ctb_addr_ts = hls_slice_data(s);
2569 if (ctb_addr_ts >= (s->ps.sps->ctb_width * s->ps.sps->ctb_height)) {
2571 if ((s->ps.pps->transquant_bypass_enable_flag ||
2572 (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) &&
2573 s->ps.sps->sao_enabled)
2574 restore_tqb_pixels(s);
2577 if (ctb_addr_ts < 0) {
2583 case HEVC_NAL_EOS_NUT:
2584 case HEVC_NAL_EOB_NUT:
2585 s->seq_decode = (s->seq_decode + 1) & 0xff;
2586 s->max_ra = INT_MAX;
2589 case HEVC_NAL_FD_NUT:
2592 av_log(s->avctx, AV_LOG_INFO,
2593 "Skipping NAL unit %d\n", s->nal_unit_type);
2598 if (s->avctx->err_recognition & AV_EF_EXPLODE)
2603 static int decode_nal_units(HEVCContext *s, const uint8_t *buf, int length)
2610 /* split the input packet into NAL units, so we know the upper bound on the
2611 * number of slices in the frame */
2612 ret = ff_h2645_packet_split(&s->pkt, buf, length, s->avctx, s->is_nalff,
2613 s->nal_length_size, s->avctx->codec_id);
2615 av_log(s->avctx, AV_LOG_ERROR,
2616 "Error splitting the input into NAL units.\n");
2620 for (i = 0; i < s->pkt.nb_nals; i++) {
2621 if (s->pkt.nals[i].type == HEVC_NAL_EOB_NUT ||
2622 s->pkt.nals[i].type == HEVC_NAL_EOS_NUT)
2626 /* decode the NAL units */
2627 for (i = 0; i < s->pkt.nb_nals; i++) {
2628 ret = decode_nal_unit(s, &s->pkt.nals[i]);
2630 av_log(s->avctx, AV_LOG_WARNING,
2631 "Error parsing NAL unit #%d.\n", i);
2638 ff_thread_report_progress(&s->ref->tf, INT_MAX, 0);
2643 static void print_md5(void *log_ctx, int level, uint8_t md5[16])
2646 for (i = 0; i < 16; i++)
2647 av_log(log_ctx, level, "%02"PRIx8, md5[i]);
2650 static int verify_md5(HEVCContext *s, AVFrame *frame)
2652 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
2657 return AVERROR(EINVAL);
2659 pixel_shift = desc->comp[0].depth > 8;
2661 av_log(s->avctx, AV_LOG_DEBUG, "Verifying checksum for frame with POC %d: ",
2664 /* the checksums are LE, so we have to byteswap for >8bpp formats
2667 if (pixel_shift && !s->checksum_buf) {
2668 av_fast_malloc(&s->checksum_buf, &s->checksum_buf_size,
2669 FFMAX3(frame->linesize[0], frame->linesize[1],
2670 frame->linesize[2]));
2671 if (!s->checksum_buf)
2672 return AVERROR(ENOMEM);
2676 for (i = 0; frame->data[i]; i++) {
2677 int width = s->avctx->coded_width;
2678 int height = s->avctx->coded_height;
2679 int w = (i == 1 || i == 2) ? (width >> desc->log2_chroma_w) : width;
2680 int h = (i == 1 || i == 2) ? (height >> desc->log2_chroma_h) : height;
2683 av_md5_init(s->md5_ctx);
2684 for (j = 0; j < h; j++) {
2685 const uint8_t *src = frame->data[i] + j * frame->linesize[i];
2688 s->bdsp.bswap16_buf((uint16_t *) s->checksum_buf,
2689 (const uint16_t *) src, w);
2690 src = s->checksum_buf;
2693 av_md5_update(s->md5_ctx, src, w << pixel_shift);
2695 av_md5_final(s->md5_ctx, md5);
2697 if (!memcmp(md5, s->md5[i], 16)) {
2698 av_log (s->avctx, AV_LOG_DEBUG, "plane %d - correct ", i);
2699 print_md5(s->avctx, AV_LOG_DEBUG, md5);
2700 av_log (s->avctx, AV_LOG_DEBUG, "; ");
2702 av_log (s->avctx, AV_LOG_ERROR, "mismatching checksum of plane %d - ", i);
2703 print_md5(s->avctx, AV_LOG_ERROR, md5);
2704 av_log (s->avctx, AV_LOG_ERROR, " != ");
2705 print_md5(s->avctx, AV_LOG_ERROR, s->md5[i]);
2706 av_log (s->avctx, AV_LOG_ERROR, "\n");
2707 return AVERROR_INVALIDDATA;
2711 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2716 static int hevc_decode_extradata(HEVCContext *s, uint8_t *buf, int length)
2718 AVCodecContext *avctx = s->avctx;
2722 bytestream2_init(&gb, buf, length);
2724 if (length > 3 && (buf[0] || buf[1] || buf[2] > 1)) {
2725 /* It seems the extradata is encoded as hvcC format.
2726 * Temporarily, we support configurationVersion==0 until 14496-15 3rd
2727 * is finalized. When finalized, configurationVersion will be 1 and we
2728 * can recognize hvcC by checking if avctx->extradata[0]==1 or not. */
2729 int i, j, num_arrays, nal_len_size;
2733 bytestream2_skip(&gb, 21);
2734 nal_len_size = (bytestream2_get_byte(&gb) & 3) + 1;
2735 num_arrays = bytestream2_get_byte(&gb);
2737 /* nal units in the hvcC always have length coded with 2 bytes,
2738 * so put a fake nal_length_size = 2 while parsing them */
2739 s->nal_length_size = 2;
2741 /* Decode nal units from hvcC. */
2742 for (i = 0; i < num_arrays; i++) {
2743 int type = bytestream2_get_byte(&gb) & 0x3f;
2744 int cnt = bytestream2_get_be16(&gb);
2746 for (j = 0; j < cnt; j++) {
2747 // +2 for the nal size field
2748 int nalsize = bytestream2_peek_be16(&gb) + 2;
2749 if (bytestream2_get_bytes_left(&gb) < nalsize) {
2750 av_log(s->avctx, AV_LOG_ERROR,
2751 "Invalid NAL unit size in extradata.\n");
2752 return AVERROR_INVALIDDATA;
2755 ret = decode_nal_units(s, gb.buffer, nalsize);
2757 av_log(avctx, AV_LOG_ERROR,
2758 "Decoding nal unit %d %d from hvcC failed\n",
2762 bytestream2_skip(&gb, nalsize);
2766 /* Now store right nal length size, that will be used to parse
2768 s->nal_length_size = nal_len_size;
2771 ret = decode_nal_units(s, buf, length);
2776 /* export stream parameters from the first SPS */
2777 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
2778 if (s->ps.sps_list[i]) {
2779 const HEVCSPS *sps = (const HEVCSPS*)s->ps.sps_list[i]->data;
2780 export_stream_params(s->avctx, &s->ps, sps);
2788 static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output,
2792 int new_extradata_size;
2793 uint8_t *new_extradata;
2794 HEVCContext *s = avctx->priv_data;
2797 ret = ff_hevc_output_frame(s, data, 1);
2805 new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
2806 &new_extradata_size);
2807 if (new_extradata && new_extradata_size > 0) {
2808 ret = hevc_decode_extradata(s, new_extradata, new_extradata_size);
2814 ret = decode_nal_units(s, avpkt->data, avpkt->size);
2818 if (avctx->hwaccel) {
2819 if (s->ref && avctx->hwaccel->end_frame(avctx) < 0)
2820 av_log(avctx, AV_LOG_ERROR,
2821 "hardware accelerator failed to decode picture\n");
2823 /* verify the SEI checksum */
2824 if (avctx->err_recognition & AV_EF_CRCCHECK && s->is_decoded &&
2826 ret = verify_md5(s, s->ref->frame);
2827 if (ret < 0 && avctx->err_recognition & AV_EF_EXPLODE) {
2828 ff_hevc_unref_frame(s, s->ref, ~0);
2835 if (s->is_decoded) {
2836 av_log(avctx, AV_LOG_DEBUG, "Decoded frame with POC %d.\n", s->poc);
2840 if (s->output_frame->buf[0]) {
2841 av_frame_move_ref(data, s->output_frame);
2848 static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
2850 int ret = ff_thread_ref_frame(&dst->tf, &src->tf);
2854 dst->tab_mvf_buf = av_buffer_ref(src->tab_mvf_buf);
2855 if (!dst->tab_mvf_buf)
2857 dst->tab_mvf = src->tab_mvf;
2859 dst->rpl_tab_buf = av_buffer_ref(src->rpl_tab_buf);
2860 if (!dst->rpl_tab_buf)
2862 dst->rpl_tab = src->rpl_tab;
2864 dst->rpl_buf = av_buffer_ref(src->rpl_buf);
2868 dst->poc = src->poc;
2869 dst->ctb_count = src->ctb_count;
2870 dst->flags = src->flags;
2871 dst->sequence = src->sequence;
2873 if (src->hwaccel_picture_private) {
2874 dst->hwaccel_priv_buf = av_buffer_ref(src->hwaccel_priv_buf);
2875 if (!dst->hwaccel_priv_buf)
2877 dst->hwaccel_picture_private = dst->hwaccel_priv_buf->data;
2882 ff_hevc_unref_frame(s, dst, ~0);
2883 return AVERROR(ENOMEM);
2886 static av_cold int hevc_decode_free(AVCodecContext *avctx)
2888 HEVCContext *s = avctx->priv_data;
2893 av_freep(&s->md5_ctx);
2895 av_frame_free(&s->tmp_frame);
2896 av_frame_free(&s->output_frame);
2898 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
2899 ff_hevc_unref_frame(s, &s->DPB[i], ~0);
2900 av_frame_free(&s->DPB[i].frame);
2903 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++)
2904 av_buffer_unref(&s->ps.vps_list[i]);
2905 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++)
2906 av_buffer_unref(&s->ps.sps_list[i]);
2907 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++)
2908 av_buffer_unref(&s->ps.pps_list[i]);
2910 ff_h2645_packet_uninit(&s->pkt);
2915 static av_cold int hevc_init_context(AVCodecContext *avctx)
2917 HEVCContext *s = avctx->priv_data;
2922 s->tmp_frame = av_frame_alloc();
2926 s->output_frame = av_frame_alloc();
2927 if (!s->output_frame)
2930 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
2931 s->DPB[i].frame = av_frame_alloc();
2932 if (!s->DPB[i].frame)
2934 s->DPB[i].tf.f = s->DPB[i].frame;
2937 s->max_ra = INT_MAX;
2939 s->md5_ctx = av_md5_alloc();
2943 ff_bswapdsp_init(&s->bdsp);
2945 s->context_initialized = 1;
2950 hevc_decode_free(avctx);
2951 return AVERROR(ENOMEM);
2954 static int hevc_update_thread_context(AVCodecContext *dst,
2955 const AVCodecContext *src)
2957 HEVCContext *s = dst->priv_data;
2958 HEVCContext *s0 = src->priv_data;
2961 if (!s->context_initialized) {
2962 ret = hevc_init_context(dst);
2967 for (i = 0; i < FF_ARRAY_ELEMS(s->DPB); i++) {
2968 ff_hevc_unref_frame(s, &s->DPB[i], ~0);
2969 if (s0->DPB[i].frame->buf[0]) {
2970 ret = hevc_ref_frame(s, &s->DPB[i], &s0->DPB[i]);
2976 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.vps_list); i++) {
2977 av_buffer_unref(&s->ps.vps_list[i]);
2978 if (s0->ps.vps_list[i]) {
2979 s->ps.vps_list[i] = av_buffer_ref(s0->ps.vps_list[i]);
2980 if (!s->ps.vps_list[i])
2981 return AVERROR(ENOMEM);
2985 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.sps_list); i++) {
2986 av_buffer_unref(&s->ps.sps_list[i]);
2987 if (s0->ps.sps_list[i]) {
2988 s->ps.sps_list[i] = av_buffer_ref(s0->ps.sps_list[i]);
2989 if (!s->ps.sps_list[i])
2990 return AVERROR(ENOMEM);
2994 for (i = 0; i < FF_ARRAY_ELEMS(s->ps.pps_list); i++) {
2995 av_buffer_unref(&s->ps.pps_list[i]);
2996 if (s0->ps.pps_list[i]) {
2997 s->ps.pps_list[i] = av_buffer_ref(s0->ps.pps_list[i]);
2998 if (!s->ps.pps_list[i])
2999 return AVERROR(ENOMEM);
3003 if (s->ps.sps != s0->ps.sps)
3004 ret = set_sps(s, s0->ps.sps, src->pix_fmt);
3006 s->seq_decode = s0->seq_decode;
3007 s->seq_output = s0->seq_output;
3008 s->pocTid0 = s0->pocTid0;
3009 s->max_ra = s0->max_ra;
3011 s->is_nalff = s0->is_nalff;
3012 s->nal_length_size = s0->nal_length_size;
3015 s->seq_decode = (s->seq_decode + 1) & 0xff;
3016 s->max_ra = INT_MAX;
3022 static av_cold int hevc_decode_init(AVCodecContext *avctx)
3024 HEVCContext *s = avctx->priv_data;
3027 avctx->internal->allocate_progress = 1;
3029 ret = hevc_init_context(avctx);
3033 if (avctx->extradata_size > 0 && avctx->extradata) {
3034 ret = hevc_decode_extradata(s, avctx->extradata, avctx->extradata_size);
3036 hevc_decode_free(avctx);
3044 static av_cold int hevc_init_thread_copy(AVCodecContext *avctx)
3046 HEVCContext *s = avctx->priv_data;
3049 memset(s, 0, sizeof(*s));
3051 ret = hevc_init_context(avctx);
3058 static void hevc_decode_flush(AVCodecContext *avctx)
3060 HEVCContext *s = avctx->priv_data;
3061 ff_hevc_flush_dpb(s);
3062 s->max_ra = INT_MAX;
3065 #define OFFSET(x) offsetof(HEVCContext, x)
3066 #define PAR (AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM)
3068 static const AVOption options[] = {
3069 { "apply_defdispwin", "Apply default display window from VUI", OFFSET(apply_defdispwin),
3070 AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, PAR },
3074 static const AVClass hevc_decoder_class = {
3075 .class_name = "HEVC decoder",
3076 .item_name = av_default_item_name,
3078 .version = LIBAVUTIL_VERSION_INT,
3081 AVCodec ff_hevc_decoder = {
3083 .long_name = NULL_IF_CONFIG_SMALL("HEVC (High Efficiency Video Coding)"),
3084 .type = AVMEDIA_TYPE_VIDEO,
3085 .id = AV_CODEC_ID_HEVC,
3086 .priv_data_size = sizeof(HEVCContext),
3087 .priv_class = &hevc_decoder_class,
3088 .init = hevc_decode_init,
3089 .close = hevc_decode_free,
3090 .decode = hevc_decode_frame,
3091 .flush = hevc_decode_flush,
3092 .update_thread_context = hevc_update_thread_context,
3093 .init_thread_copy = hevc_init_thread_copy,
3094 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
3095 AV_CODEC_CAP_FRAME_THREADS,
3096 .profiles = NULL_IF_CONFIG_SMALL(ff_hevc_profiles),
3097 .caps_internal = FF_CODEC_CAP_EXPORTS_CROPPING | FF_CODEC_CAP_INIT_THREADSAFE,