2 * VP8 compatible video decoder
4 * Copyright (C) 2010 David Conrad
5 * Copyright (C) 2010 Ronald S. Bultje
6 * Copyright (C) 2010 Jason Garrett-Glaser
7 * Copyright (C) 2012 Daniel Kang
9 * This file is part of FFmpeg.
11 * FFmpeg is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU Lesser General Public
13 * License as published by the Free Software Foundation; either
14 * version 2.1 of the License, or (at your option) any later version.
16 * FFmpeg is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * Lesser General Public License for more details.
21 * You should have received a copy of the GNU Lesser General Public
22 * License along with FFmpeg; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "libavutil/imgutils.h"
31 #include "rectangle.h"
38 static void free_buffers(VP8Context *s)
42 for (i = 0; i < MAX_THREADS; i++) {
43 av_freep(&s->thread_data[i].filter_strength);
44 av_freep(&s->thread_data[i].edge_emu_buffer);
46 av_freep(&s->thread_data);
47 av_freep(&s->macroblocks_base);
48 av_freep(&s->intra4x4_pred_mode_top);
49 av_freep(&s->top_nnz);
50 av_freep(&s->top_border);
52 s->macroblocks = NULL;
55 static int vp8_alloc_frame(VP8Context *s, VP8Frame *f, int ref)
58 if ((ret = ff_thread_get_buffer(s->avctx, &f->tf,
59 ref ? AV_GET_BUFFER_FLAG_REF : 0)) < 0)
61 if (!(f->seg_map = av_buffer_allocz(s->mb_width * s->mb_height))) {
62 ff_thread_release_buffer(s->avctx, &f->tf);
63 return AVERROR(ENOMEM);
68 static void vp8_release_frame(VP8Context *s, VP8Frame *f)
70 av_buffer_unref(&f->seg_map);
71 ff_thread_release_buffer(s->avctx, &f->tf);
74 static int vp8_ref_frame(VP8Context *s, VP8Frame *dst, VP8Frame *src)
78 vp8_release_frame(s, dst);
80 if ((ret = ff_thread_ref_frame(&dst->tf, &src->tf)) < 0)
83 !(dst->seg_map = av_buffer_ref(src->seg_map))) {
84 vp8_release_frame(s, dst);
85 return AVERROR(ENOMEM);
92 static void vp8_decode_flush_impl(AVCodecContext *avctx, int free_mem)
94 VP8Context *s = avctx->priv_data;
97 for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
98 vp8_release_frame(s, &s->frames[i]);
99 memset(s->framep, 0, sizeof(s->framep));
105 static void vp8_decode_flush(AVCodecContext *avctx)
107 vp8_decode_flush_impl(avctx, 0);
110 static int update_dimensions(VP8Context *s, int width, int height)
112 AVCodecContext *avctx = s->avctx;
115 if (width != s->avctx->width || ((width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) && s->macroblocks_base ||
116 height != s->avctx->height) {
117 if (av_image_check_size(width, height, 0, s->avctx))
118 return AVERROR_INVALIDDATA;
120 vp8_decode_flush_impl(s->avctx, 1);
122 avcodec_set_dimensions(s->avctx, width, height);
125 s->mb_width = (s->avctx->coded_width +15) / 16;
126 s->mb_height = (s->avctx->coded_height+15) / 16;
128 s->mb_layout = (avctx->active_thread_type == FF_THREAD_SLICE) && (FFMIN(s->num_coeff_partitions, avctx->thread_count) > 1);
129 if (!s->mb_layout) { // Frame threading and one thread
130 s->macroblocks_base = av_mallocz((s->mb_width+s->mb_height*2+1)*sizeof(*s->macroblocks));
131 s->intra4x4_pred_mode_top = av_mallocz(s->mb_width*4);
133 else // Sliced threading
134 s->macroblocks_base = av_mallocz((s->mb_width+2)*(s->mb_height+2)*sizeof(*s->macroblocks));
135 s->top_nnz = av_mallocz(s->mb_width*sizeof(*s->top_nnz));
136 s->top_border = av_mallocz((s->mb_width+1)*sizeof(*s->top_border));
137 s->thread_data = av_mallocz(MAX_THREADS*sizeof(VP8ThreadData));
139 for (i = 0; i < MAX_THREADS; i++) {
140 s->thread_data[i].filter_strength = av_mallocz(s->mb_width*sizeof(*s->thread_data[0].filter_strength));
142 pthread_mutex_init(&s->thread_data[i].lock, NULL);
143 pthread_cond_init(&s->thread_data[i].cond, NULL);
147 if (!s->macroblocks_base || !s->top_nnz || !s->top_border ||
148 (!s->intra4x4_pred_mode_top && !s->mb_layout))
149 return AVERROR(ENOMEM);
151 s->macroblocks = s->macroblocks_base + 1;
156 static void parse_segment_info(VP8Context *s)
158 VP56RangeCoder *c = &s->c;
161 s->segmentation.update_map = vp8_rac_get(c);
163 if (vp8_rac_get(c)) { // update segment feature data
164 s->segmentation.absolute_vals = vp8_rac_get(c);
166 for (i = 0; i < 4; i++)
167 s->segmentation.base_quant[i] = vp8_rac_get_sint(c, 7);
169 for (i = 0; i < 4; i++)
170 s->segmentation.filter_level[i] = vp8_rac_get_sint(c, 6);
172 if (s->segmentation.update_map)
173 for (i = 0; i < 3; i++)
174 s->prob->segmentid[i] = vp8_rac_get(c) ? vp8_rac_get_uint(c, 8) : 255;
177 static void update_lf_deltas(VP8Context *s)
179 VP56RangeCoder *c = &s->c;
182 for (i = 0; i < 4; i++) {
183 if (vp8_rac_get(c)) {
184 s->lf_delta.ref[i] = vp8_rac_get_uint(c, 6);
187 s->lf_delta.ref[i] = -s->lf_delta.ref[i];
191 for (i = MODE_I4x4; i <= VP8_MVMODE_SPLIT; i++) {
192 if (vp8_rac_get(c)) {
193 s->lf_delta.mode[i] = vp8_rac_get_uint(c, 6);
196 s->lf_delta.mode[i] = -s->lf_delta.mode[i];
201 static int setup_partitions(VP8Context *s, const uint8_t *buf, int buf_size)
203 const uint8_t *sizes = buf;
206 s->num_coeff_partitions = 1 << vp8_rac_get_uint(&s->c, 2);
208 buf += 3*(s->num_coeff_partitions-1);
209 buf_size -= 3*(s->num_coeff_partitions-1);
213 for (i = 0; i < s->num_coeff_partitions-1; i++) {
214 int size = AV_RL24(sizes + 3*i);
215 if (buf_size - size < 0)
218 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, size);
222 ff_vp56_init_range_decoder(&s->coeff_partition[i], buf, buf_size);
227 static void get_quants(VP8Context *s)
229 VP56RangeCoder *c = &s->c;
232 int yac_qi = vp8_rac_get_uint(c, 7);
233 int ydc_delta = vp8_rac_get_sint(c, 4);
234 int y2dc_delta = vp8_rac_get_sint(c, 4);
235 int y2ac_delta = vp8_rac_get_sint(c, 4);
236 int uvdc_delta = vp8_rac_get_sint(c, 4);
237 int uvac_delta = vp8_rac_get_sint(c, 4);
239 for (i = 0; i < 4; i++) {
240 if (s->segmentation.enabled) {
241 base_qi = s->segmentation.base_quant[i];
242 if (!s->segmentation.absolute_vals)
247 s->qmat[i].luma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + ydc_delta , 7)];
248 s->qmat[i].luma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi , 7)];
249 s->qmat[i].luma_dc_qmul[0] = 2 * vp8_dc_qlookup[av_clip_uintp2(base_qi + y2dc_delta, 7)];
250 /* 101581>>16 is equivalent to 155/100 */
251 s->qmat[i].luma_dc_qmul[1] = (101581 * vp8_ac_qlookup[av_clip_uintp2(base_qi + y2ac_delta, 7)]) >> 16;
252 s->qmat[i].chroma_qmul[0] = vp8_dc_qlookup[av_clip_uintp2(base_qi + uvdc_delta, 7)];
253 s->qmat[i].chroma_qmul[1] = vp8_ac_qlookup[av_clip_uintp2(base_qi + uvac_delta, 7)];
255 s->qmat[i].luma_dc_qmul[1] = FFMAX(s->qmat[i].luma_dc_qmul[1], 8);
256 s->qmat[i].chroma_qmul[0] = FFMIN(s->qmat[i].chroma_qmul[0], 132);
261 * Determine which buffers golden and altref should be updated with after this frame.
262 * The spec isn't clear here, so I'm going by my understanding of what libvpx does
264 * Intra frames update all 3 references
265 * Inter frames update VP56_FRAME_PREVIOUS if the update_last flag is set
266 * If the update (golden|altref) flag is set, it's updated with the current frame
267 * if update_last is set, and VP56_FRAME_PREVIOUS otherwise.
268 * If the flag is not set, the number read means:
270 * 1: VP56_FRAME_PREVIOUS
271 * 2: update golden with altref, or update altref with golden
273 static VP56Frame ref_to_update(VP8Context *s, int update, VP56Frame ref)
275 VP56RangeCoder *c = &s->c;
278 return VP56_FRAME_CURRENT;
280 switch (vp8_rac_get_uint(c, 2)) {
282 return VP56_FRAME_PREVIOUS;
284 return (ref == VP56_FRAME_GOLDEN) ? VP56_FRAME_GOLDEN2 : VP56_FRAME_GOLDEN;
286 return VP56_FRAME_NONE;
289 static void update_refs(VP8Context *s)
291 VP56RangeCoder *c = &s->c;
293 int update_golden = vp8_rac_get(c);
294 int update_altref = vp8_rac_get(c);
296 s->update_golden = ref_to_update(s, update_golden, VP56_FRAME_GOLDEN);
297 s->update_altref = ref_to_update(s, update_altref, VP56_FRAME_GOLDEN2);
300 static int decode_frame_header(VP8Context *s, const uint8_t *buf, int buf_size)
302 VP56RangeCoder *c = &s->c;
303 int header_size, hscale, vscale, i, j, k, l, m, ret;
304 int width = s->avctx->width;
305 int height = s->avctx->height;
307 s->keyframe = !(buf[0] & 1);
308 s->profile = (buf[0]>>1) & 7;
309 s->invisible = !(buf[0] & 0x10);
310 header_size = AV_RL24(buf) >> 5;
315 av_log(s->avctx, AV_LOG_WARNING, "Unknown profile %d\n", s->profile);
318 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_epel_pixels_tab, sizeof(s->put_pixels_tab));
319 else // profile 1-3 use bilinear, 4+ aren't defined so whatever
320 memcpy(s->put_pixels_tab, s->vp8dsp.put_vp8_bilinear_pixels_tab, sizeof(s->put_pixels_tab));
322 if (header_size > buf_size - 7*s->keyframe) {
323 av_log(s->avctx, AV_LOG_ERROR, "Header size larger than data provided\n");
324 return AVERROR_INVALIDDATA;
328 if (AV_RL24(buf) != 0x2a019d) {
329 av_log(s->avctx, AV_LOG_ERROR, "Invalid start code 0x%x\n", AV_RL24(buf));
330 return AVERROR_INVALIDDATA;
332 width = AV_RL16(buf+3) & 0x3fff;
333 height = AV_RL16(buf+5) & 0x3fff;
334 hscale = buf[4] >> 6;
335 vscale = buf[6] >> 6;
339 if (hscale || vscale)
340 avpriv_request_sample(s->avctx, "Upscaling");
342 s->update_golden = s->update_altref = VP56_FRAME_CURRENT;
343 for (i = 0; i < 4; i++)
344 for (j = 0; j < 16; j++)
345 memcpy(s->prob->token[i][j], vp8_token_default_probs[i][vp8_coeff_band[j]],
346 sizeof(s->prob->token[i][j]));
347 memcpy(s->prob->pred16x16, vp8_pred16x16_prob_inter, sizeof(s->prob->pred16x16));
348 memcpy(s->prob->pred8x8c , vp8_pred8x8c_prob_inter , sizeof(s->prob->pred8x8c));
349 memcpy(s->prob->mvc , vp8_mv_default_prob , sizeof(s->prob->mvc));
350 memset(&s->segmentation, 0, sizeof(s->segmentation));
351 memset(&s->lf_delta, 0, sizeof(s->lf_delta));
354 ff_vp56_init_range_decoder(c, buf, header_size);
356 buf_size -= header_size;
360 av_log(s->avctx, AV_LOG_WARNING, "Unspecified colorspace\n");
361 vp8_rac_get(c); // whether we can skip clamping in dsp functions
364 if ((s->segmentation.enabled = vp8_rac_get(c)))
365 parse_segment_info(s);
367 s->segmentation.update_map = 0; // FIXME: move this to some init function?
369 s->filter.simple = vp8_rac_get(c);
370 s->filter.level = vp8_rac_get_uint(c, 6);
371 s->filter.sharpness = vp8_rac_get_uint(c, 3);
373 if ((s->lf_delta.enabled = vp8_rac_get(c)))
377 if (setup_partitions(s, buf, buf_size)) {
378 av_log(s->avctx, AV_LOG_ERROR, "Invalid partitions\n");
379 return AVERROR_INVALIDDATA;
382 if (!s->macroblocks_base || /* first frame */
383 width != s->avctx->width || height != s->avctx->height || (width+15)/16 != s->mb_width || (height+15)/16 != s->mb_height) {
384 if ((ret = update_dimensions(s, width, height)) < 0)
392 s->sign_bias[VP56_FRAME_GOLDEN] = vp8_rac_get(c);
393 s->sign_bias[VP56_FRAME_GOLDEN2 /* altref */] = vp8_rac_get(c);
396 // if we aren't saving this frame's probabilities for future frames,
397 // make a copy of the current probabilities
398 if (!(s->update_probabilities = vp8_rac_get(c)))
399 s->prob[1] = s->prob[0];
401 s->update_last = s->keyframe || vp8_rac_get(c);
403 for (i = 0; i < 4; i++)
404 for (j = 0; j < 8; j++)
405 for (k = 0; k < 3; k++)
406 for (l = 0; l < NUM_DCT_TOKENS-1; l++)
407 if (vp56_rac_get_prob_branchy(c, vp8_token_update_probs[i][j][k][l])) {
408 int prob = vp8_rac_get_uint(c, 8);
409 for (m = 0; vp8_coeff_band_indexes[j][m] >= 0; m++)
410 s->prob->token[i][vp8_coeff_band_indexes[j][m]][k][l] = prob;
413 if ((s->mbskip_enabled = vp8_rac_get(c)))
414 s->prob->mbskip = vp8_rac_get_uint(c, 8);
417 s->prob->intra = vp8_rac_get_uint(c, 8);
418 s->prob->last = vp8_rac_get_uint(c, 8);
419 s->prob->golden = vp8_rac_get_uint(c, 8);
422 for (i = 0; i < 4; i++)
423 s->prob->pred16x16[i] = vp8_rac_get_uint(c, 8);
425 for (i = 0; i < 3; i++)
426 s->prob->pred8x8c[i] = vp8_rac_get_uint(c, 8);
428 // 17.2 MV probability update
429 for (i = 0; i < 2; i++)
430 for (j = 0; j < 19; j++)
431 if (vp56_rac_get_prob_branchy(c, vp8_mv_update_prob[i][j]))
432 s->prob->mvc[i][j] = vp8_rac_get_nn(c);
438 static av_always_inline void clamp_mv(VP8Context *s, VP56mv *dst, const VP56mv *src)
440 dst->x = av_clip(src->x, s->mv_min.x, s->mv_max.x);
441 dst->y = av_clip(src->y, s->mv_min.y, s->mv_max.y);
445 * Motion vector coding, 17.1.
447 static int read_mv_component(VP56RangeCoder *c, const uint8_t *p)
451 if (vp56_rac_get_prob_branchy(c, p[0])) {
454 for (i = 0; i < 3; i++)
455 x += vp56_rac_get_prob(c, p[9 + i]) << i;
456 for (i = 9; i > 3; i--)
457 x += vp56_rac_get_prob(c, p[9 + i]) << i;
458 if (!(x & 0xFFF0) || vp56_rac_get_prob(c, p[12]))
462 const uint8_t *ps = p+2;
463 bit = vp56_rac_get_prob(c, *ps);
466 bit = vp56_rac_get_prob(c, *ps);
469 x += vp56_rac_get_prob(c, *ps);
472 return (x && vp56_rac_get_prob(c, p[1])) ? -x : x;
475 static av_always_inline
476 const uint8_t *get_submv_prob(uint32_t left, uint32_t top)
479 return vp8_submv_prob[4-!!left];
481 return vp8_submv_prob[2];
482 return vp8_submv_prob[1-!!left];
486 * Split motion vector prediction, 16.4.
487 * @returns the number of motion vectors parsed (2, 4 or 16)
489 static av_always_inline
490 int decode_splitmvs(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb, int layout)
494 VP8Macroblock *top_mb;
495 VP8Macroblock *left_mb = &mb[-1];
496 const uint8_t *mbsplits_left = vp8_mbsplits[left_mb->partitioning],
498 *mbsplits_cur, *firstidx;
500 VP56mv *left_mv = left_mb->bmv;
501 VP56mv *cur_mv = mb->bmv;
503 if (!layout) // layout is inlined, s->mb_layout is not
506 top_mb = &mb[-s->mb_width-1];
507 mbsplits_top = vp8_mbsplits[top_mb->partitioning];
508 top_mv = top_mb->bmv;
510 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[0])) {
511 if (vp56_rac_get_prob_branchy(c, vp8_mbsplit_prob[1])) {
512 part_idx = VP8_SPLITMVMODE_16x8 + vp56_rac_get_prob(c, vp8_mbsplit_prob[2]);
514 part_idx = VP8_SPLITMVMODE_8x8;
517 part_idx = VP8_SPLITMVMODE_4x4;
520 num = vp8_mbsplit_count[part_idx];
521 mbsplits_cur = vp8_mbsplits[part_idx],
522 firstidx = vp8_mbfirstidx[part_idx];
523 mb->partitioning = part_idx;
525 for (n = 0; n < num; n++) {
527 uint32_t left, above;
528 const uint8_t *submv_prob;
531 left = AV_RN32A(&left_mv[mbsplits_left[k + 3]]);
533 left = AV_RN32A(&cur_mv[mbsplits_cur[k - 1]]);
535 above = AV_RN32A(&top_mv[mbsplits_top[k + 12]]);
537 above = AV_RN32A(&cur_mv[mbsplits_cur[k - 4]]);
539 submv_prob = get_submv_prob(left, above);
541 if (vp56_rac_get_prob_branchy(c, submv_prob[0])) {
542 if (vp56_rac_get_prob_branchy(c, submv_prob[1])) {
543 if (vp56_rac_get_prob_branchy(c, submv_prob[2])) {
544 mb->bmv[n].y = mb->mv.y + read_mv_component(c, s->prob->mvc[0]);
545 mb->bmv[n].x = mb->mv.x + read_mv_component(c, s->prob->mvc[1]);
547 AV_ZERO32(&mb->bmv[n]);
550 AV_WN32A(&mb->bmv[n], above);
553 AV_WN32A(&mb->bmv[n], left);
560 static av_always_inline
561 void decode_mvs(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int layout)
563 VP8Macroblock *mb_edge[3] = { 0 /* top */,
566 enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
567 enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
569 int cur_sign_bias = s->sign_bias[mb->ref_frame];
570 int8_t *sign_bias = s->sign_bias;
572 uint8_t cnt[4] = { 0 };
573 VP56RangeCoder *c = &s->c;
575 if (!layout) { // layout is inlined (s->mb_layout is not)
580 mb_edge[0] = mb - s->mb_width-1;
581 mb_edge[2] = mb - s->mb_width-2;
584 AV_ZERO32(&near_mv[0]);
585 AV_ZERO32(&near_mv[1]);
586 AV_ZERO32(&near_mv[2]);
588 /* Process MB on top, left and top-left */
589 #define MV_EDGE_CHECK(n)\
591 VP8Macroblock *edge = mb_edge[n];\
592 int edge_ref = edge->ref_frame;\
593 if (edge_ref != VP56_FRAME_CURRENT) {\
594 uint32_t mv = AV_RN32A(&edge->mv);\
596 if (cur_sign_bias != sign_bias[edge_ref]) {\
597 /* SWAR negate of the values in mv. */\
599 mv = ((mv&0x7fff7fff) + 0x00010001) ^ (mv&0x80008000);\
601 if (!n || mv != AV_RN32A(&near_mv[idx]))\
602 AV_WN32A(&near_mv[++idx], mv);\
603 cnt[idx] += 1 + (n != 2);\
605 cnt[CNT_ZERO] += 1 + (n != 2);\
613 mb->partitioning = VP8_SPLITMVMODE_NONE;
614 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
615 mb->mode = VP8_MVMODE_MV;
617 /* If we have three distinct MVs, merge first and last if they're the same */
618 if (cnt[CNT_SPLITMV] && AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
619 cnt[CNT_NEAREST] += 1;
621 /* Swap near and nearest if necessary */
622 if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
623 FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
624 FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
627 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
628 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
630 /* Choose the best mv out of 0,0 and the nearest mv */
631 clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
632 cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
633 (mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
634 (mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
636 if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
637 mb->mode = VP8_MVMODE_SPLIT;
638 mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout) - 1];
640 mb->mv.y += read_mv_component(c, s->prob->mvc[0]);
641 mb->mv.x += read_mv_component(c, s->prob->mvc[1]);
645 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
649 clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
653 mb->mode = VP8_MVMODE_ZERO;
659 static av_always_inline
660 void decode_intra4x4_modes(VP8Context *s, VP56RangeCoder *c, VP8Macroblock *mb,
661 int mb_x, int keyframe, int layout)
663 uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
666 VP8Macroblock *mb_top = mb - s->mb_width - 1;
667 memcpy(mb->intra4x4_pred_mode_top, mb_top->intra4x4_pred_mode_top, 4);
672 uint8_t* const left = s->intra4x4_pred_mode_left;
674 top = mb->intra4x4_pred_mode_top;
676 top = s->intra4x4_pred_mode_top + 4 * mb_x;
677 for (y = 0; y < 4; y++) {
678 for (x = 0; x < 4; x++) {
680 ctx = vp8_pred4x4_prob_intra[top[x]][left[y]];
681 *intra4x4 = vp8_rac_get_tree(c, vp8_pred4x4_tree, ctx);
682 left[y] = top[x] = *intra4x4;
688 for (i = 0; i < 16; i++)
689 intra4x4[i] = vp8_rac_get_tree(c, vp8_pred4x4_tree, vp8_pred4x4_prob_inter);
693 static av_always_inline
694 void decode_mb_mode(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y,
695 uint8_t *segment, uint8_t *ref, int layout)
697 VP56RangeCoder *c = &s->c;
699 if (s->segmentation.update_map) {
700 int bit = vp56_rac_get_prob(c, s->prob->segmentid[0]);
701 *segment = vp56_rac_get_prob(c, s->prob->segmentid[1+bit]) + 2*bit;
702 } else if (s->segmentation.enabled)
703 *segment = ref ? *ref : *segment;
704 mb->segment = *segment;
706 mb->skip = s->mbskip_enabled ? vp56_rac_get_prob(c, s->prob->mbskip) : 0;
709 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_intra, vp8_pred16x16_prob_intra);
711 if (mb->mode == MODE_I4x4) {
712 decode_intra4x4_modes(s, c, mb, mb_x, 1, layout);
714 const uint32_t modes = vp8_pred4x4_mode[mb->mode] * 0x01010101u;
715 if (s->mb_layout == 1)
716 AV_WN32A(mb->intra4x4_pred_mode_top, modes);
718 AV_WN32A(s->intra4x4_pred_mode_top + 4 * mb_x, modes);
719 AV_WN32A( s->intra4x4_pred_mode_left, modes);
722 mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, vp8_pred8x8c_prob_intra);
723 mb->ref_frame = VP56_FRAME_CURRENT;
724 } else if (vp56_rac_get_prob_branchy(c, s->prob->intra)) {
726 if (vp56_rac_get_prob_branchy(c, s->prob->last))
727 mb->ref_frame = vp56_rac_get_prob(c, s->prob->golden) ?
728 VP56_FRAME_GOLDEN2 /* altref */ : VP56_FRAME_GOLDEN;
730 mb->ref_frame = VP56_FRAME_PREVIOUS;
731 s->ref_count[mb->ref_frame-1]++;
733 // motion vectors, 16.3
734 decode_mvs(s, mb, mb_x, mb_y, layout);
737 mb->mode = vp8_rac_get_tree(c, vp8_pred16x16_tree_inter, s->prob->pred16x16);
739 if (mb->mode == MODE_I4x4)
740 decode_intra4x4_modes(s, c, mb, mb_x, 0, layout);
742 mb->chroma_pred_mode = vp8_rac_get_tree(c, vp8_pred8x8c_tree, s->prob->pred8x8c);
743 mb->ref_frame = VP56_FRAME_CURRENT;
744 mb->partitioning = VP8_SPLITMVMODE_NONE;
745 AV_ZERO32(&mb->bmv[0]);
749 #ifndef decode_block_coeffs_internal
751 * @param r arithmetic bitstream reader context
752 * @param block destination for block coefficients
753 * @param probs probabilities to use when reading trees from the bitstream
754 * @param i initial coeff index, 0 unless a separate DC block is coded
755 * @param qmul array holding the dc/ac dequant factor at position 0/1
756 * @return 0 if no coeffs were decoded
757 * otherwise, the index of the last coeff decoded plus one
759 static int decode_block_coeffs_internal(VP56RangeCoder *r, int16_t block[16],
760 uint8_t probs[16][3][NUM_DCT_TOKENS-1],
761 int i, uint8_t *token_prob, int16_t qmul[2])
763 VP56RangeCoder c = *r;
767 if (!vp56_rac_get_prob_branchy(&c, token_prob[0])) // DCT_EOB
771 if (!vp56_rac_get_prob_branchy(&c, token_prob[1])) { // DCT_0
773 break; // invalid input; blocks should end with EOB
774 token_prob = probs[i][0];
778 if (!vp56_rac_get_prob_branchy(&c, token_prob[2])) { // DCT_1
780 token_prob = probs[i+1][1];
782 if (!vp56_rac_get_prob_branchy(&c, token_prob[3])) { // DCT 2,3,4
783 coeff = vp56_rac_get_prob_branchy(&c, token_prob[4]);
785 coeff += vp56_rac_get_prob(&c, token_prob[5]);
789 if (!vp56_rac_get_prob_branchy(&c, token_prob[6])) {
790 if (!vp56_rac_get_prob_branchy(&c, token_prob[7])) { // DCT_CAT1
791 coeff = 5 + vp56_rac_get_prob(&c, vp8_dct_cat1_prob[0]);
794 coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[0]) << 1;
795 coeff += vp56_rac_get_prob(&c, vp8_dct_cat2_prob[1]);
797 } else { // DCT_CAT3 and up
798 int a = vp56_rac_get_prob(&c, token_prob[8]);
799 int b = vp56_rac_get_prob(&c, token_prob[9+a]);
800 int cat = (a<<1) + b;
801 coeff = 3 + (8<<cat);
802 coeff += vp8_rac_get_coeff(&c, ff_vp8_dct_cat_prob[cat]);
805 token_prob = probs[i+1][2];
807 block[zigzag_scan[i]] = (vp8_rac_get(&c) ? -coeff : coeff) * qmul[!!i];
816 * @param c arithmetic bitstream reader context
817 * @param block destination for block coefficients
818 * @param probs probabilities to use when reading trees from the bitstream
819 * @param i initial coeff index, 0 unless a separate DC block is coded
820 * @param zero_nhood the initial prediction context for number of surrounding
821 * all-zero blocks (only left/top, so 0-2)
822 * @param qmul array holding the dc/ac dequant factor at position 0/1
823 * @return 0 if no coeffs were decoded
824 * otherwise, the index of the last coeff decoded plus one
826 static av_always_inline
827 int decode_block_coeffs(VP56RangeCoder *c, int16_t block[16],
828 uint8_t probs[16][3][NUM_DCT_TOKENS-1],
829 int i, int zero_nhood, int16_t qmul[2])
831 uint8_t *token_prob = probs[i][zero_nhood];
832 if (!vp56_rac_get_prob_branchy(c, token_prob[0])) // DCT_EOB
834 return decode_block_coeffs_internal(c, block, probs, i, token_prob, qmul);
837 static av_always_inline
838 void decode_mb_coeffs(VP8Context *s, VP8ThreadData *td, VP56RangeCoder *c, VP8Macroblock *mb,
839 uint8_t t_nnz[9], uint8_t l_nnz[9])
841 int i, x, y, luma_start = 0, luma_ctx = 3;
842 int nnz_pred, nnz, nnz_total = 0;
843 int segment = mb->segment;
846 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
847 nnz_pred = t_nnz[8] + l_nnz[8];
849 // decode DC values and do hadamard
850 nnz = decode_block_coeffs(c, td->block_dc, s->prob->token[1], 0, nnz_pred,
851 s->qmat[segment].luma_dc_qmul);
852 l_nnz[8] = t_nnz[8] = !!nnz;
857 s->vp8dsp.vp8_luma_dc_wht_dc(td->block, td->block_dc);
859 s->vp8dsp.vp8_luma_dc_wht(td->block, td->block_dc);
866 for (y = 0; y < 4; y++)
867 for (x = 0; x < 4; x++) {
868 nnz_pred = l_nnz[y] + t_nnz[x];
869 nnz = decode_block_coeffs(c, td->block[y][x], s->prob->token[luma_ctx], luma_start,
870 nnz_pred, s->qmat[segment].luma_qmul);
871 // nnz+block_dc may be one more than the actual last index, but we don't care
872 td->non_zero_count_cache[y][x] = nnz + block_dc;
873 t_nnz[x] = l_nnz[y] = !!nnz;
878 // TODO: what to do about dimensions? 2nd dim for luma is x,
879 // but for chroma it's (y<<1)|x
880 for (i = 4; i < 6; i++)
881 for (y = 0; y < 2; y++)
882 for (x = 0; x < 2; x++) {
883 nnz_pred = l_nnz[i+2*y] + t_nnz[i+2*x];
884 nnz = decode_block_coeffs(c, td->block[i][(y<<1)+x], s->prob->token[2], 0,
885 nnz_pred, s->qmat[segment].chroma_qmul);
886 td->non_zero_count_cache[i][(y<<1)+x] = nnz;
887 t_nnz[i+2*x] = l_nnz[i+2*y] = !!nnz;
891 // if there were no coded coeffs despite the macroblock not being marked skip,
892 // we MUST not do the inner loop filter and should not do IDCT
893 // Since skip isn't used for bitstream prediction, just manually set it.
898 static av_always_inline
899 void backup_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
900 int linesize, int uvlinesize, int simple)
902 AV_COPY128(top_border, src_y + 15*linesize);
904 AV_COPY64(top_border+16, src_cb + 7*uvlinesize);
905 AV_COPY64(top_border+24, src_cr + 7*uvlinesize);
909 static av_always_inline
910 void xchg_mb_border(uint8_t *top_border, uint8_t *src_y, uint8_t *src_cb, uint8_t *src_cr,
911 int linesize, int uvlinesize, int mb_x, int mb_y, int mb_width,
912 int simple, int xchg)
914 uint8_t *top_border_m1 = top_border-32; // for TL prediction
916 src_cb -= uvlinesize;
917 src_cr -= uvlinesize;
919 #define XCHG(a,b,xchg) do { \
920 if (xchg) AV_SWAP64(b,a); \
921 else AV_COPY64(b,a); \
924 XCHG(top_border_m1+8, src_y-8, xchg);
925 XCHG(top_border, src_y, xchg);
926 XCHG(top_border+8, src_y+8, 1);
927 if (mb_x < mb_width-1)
928 XCHG(top_border+32, src_y+16, 1);
930 // only copy chroma for normal loop filter
931 // or to initialize the top row to 127
932 if (!simple || !mb_y) {
933 XCHG(top_border_m1+16, src_cb-8, xchg);
934 XCHG(top_border_m1+24, src_cr-8, xchg);
935 XCHG(top_border+16, src_cb, 1);
936 XCHG(top_border+24, src_cr, 1);
940 static av_always_inline
941 int check_dc_pred8x8_mode(int mode, int mb_x, int mb_y)
944 return mb_y ? TOP_DC_PRED8x8 : DC_128_PRED8x8;
946 return mb_y ? mode : LEFT_DC_PRED8x8;
950 static av_always_inline
951 int check_tm_pred8x8_mode(int mode, int mb_x, int mb_y)
954 return mb_y ? VERT_PRED8x8 : DC_129_PRED8x8;
956 return mb_y ? mode : HOR_PRED8x8;
960 static av_always_inline
961 int check_intra_pred8x8_mode(int mode, int mb_x, int mb_y)
963 if (mode == DC_PRED8x8) {
964 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
970 static av_always_inline
971 int check_intra_pred8x8_mode_emuedge(int mode, int mb_x, int mb_y)
975 return check_dc_pred8x8_mode(mode, mb_x, mb_y);
977 return !mb_y ? DC_127_PRED8x8 : mode;
979 return !mb_x ? DC_129_PRED8x8 : mode;
980 case PLANE_PRED8x8 /*TM*/:
981 return check_tm_pred8x8_mode(mode, mb_x, mb_y);
986 static av_always_inline
987 int check_tm_pred4x4_mode(int mode, int mb_x, int mb_y)
990 return mb_y ? VERT_VP8_PRED : DC_129_PRED;
992 return mb_y ? mode : HOR_VP8_PRED;
996 static av_always_inline
997 int check_intra_pred4x4_mode_emuedge(int mode, int mb_x, int mb_y, int *copy_buf)
1001 if (!mb_x && mb_y) {
1006 case DIAG_DOWN_LEFT_PRED:
1007 case VERT_LEFT_PRED:
1008 return !mb_y ? DC_127_PRED : mode;
1016 return !mb_x ? DC_129_PRED : mode;
1018 return check_tm_pred4x4_mode(mode, mb_x, mb_y);
1019 case DC_PRED: // 4x4 DC doesn't use the same "H.264-style" exceptions as 16x16/8x8 DC
1020 case DIAG_DOWN_RIGHT_PRED:
1021 case VERT_RIGHT_PRED:
1030 static av_always_inline
1031 void intra_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
1032 VP8Macroblock *mb, int mb_x, int mb_y)
1034 AVCodecContext *avctx = s->avctx;
1035 int x, y, mode, nnz;
1038 // for the first row, we need to run xchg_mb_border to init the top edge to 127
1039 // otherwise, skip it if we aren't going to deblock
1040 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1041 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1042 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1043 s->filter.simple, 1);
1045 if (mb->mode < MODE_I4x4) {
1046 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // tested
1047 mode = check_intra_pred8x8_mode_emuedge(mb->mode, mb_x, mb_y);
1049 mode = check_intra_pred8x8_mode(mb->mode, mb_x, mb_y);
1051 s->hpc.pred16x16[mode](dst[0], s->linesize);
1053 uint8_t *ptr = dst[0];
1054 uint8_t *intra4x4 = mb->intra4x4_pred_mode_mb;
1055 uint8_t tr_top[4] = { 127, 127, 127, 127 };
1057 // all blocks on the right edge of the macroblock use bottom edge
1058 // the top macroblock for their topright edge
1059 uint8_t *tr_right = ptr - s->linesize + 16;
1061 // if we're on the right edge of the frame, said edge is extended
1062 // from the top macroblock
1063 if (!(!mb_y && avctx->flags & CODEC_FLAG_EMU_EDGE) &&
1064 mb_x == s->mb_width-1) {
1065 tr = tr_right[-1]*0x01010101u;
1066 tr_right = (uint8_t *)&tr;
1070 AV_ZERO128(td->non_zero_count_cache);
1072 for (y = 0; y < 4; y++) {
1073 uint8_t *topright = ptr + 4 - s->linesize;
1074 for (x = 0; x < 4; x++) {
1075 int copy = 0, linesize = s->linesize;
1076 uint8_t *dst = ptr+4*x;
1077 DECLARE_ALIGNED(4, uint8_t, copy_dst)[5*8];
1079 if ((y == 0 || x == 3) && mb_y == 0 && avctx->flags & CODEC_FLAG_EMU_EDGE) {
1082 topright = tr_right;
1084 if (avctx->flags & CODEC_FLAG_EMU_EDGE) { // mb_x+x or mb_y+y is a hack but works
1085 mode = check_intra_pred4x4_mode_emuedge(intra4x4[x], mb_x + x, mb_y + y, ©);
1087 dst = copy_dst + 12;
1091 AV_WN32A(copy_dst+4, 127U * 0x01010101U);
1093 AV_COPY32(copy_dst+4, ptr+4*x-s->linesize);
1097 copy_dst[3] = ptr[4*x-s->linesize-1];
1104 copy_dst[35] = 129U;
1106 copy_dst[11] = ptr[4*x -1];
1107 copy_dst[19] = ptr[4*x+s->linesize -1];
1108 copy_dst[27] = ptr[4*x+s->linesize*2-1];
1109 copy_dst[35] = ptr[4*x+s->linesize*3-1];
1115 s->hpc.pred4x4[mode](dst, topright, linesize);
1117 AV_COPY32(ptr+4*x , copy_dst+12);
1118 AV_COPY32(ptr+4*x+s->linesize , copy_dst+20);
1119 AV_COPY32(ptr+4*x+s->linesize*2, copy_dst+28);
1120 AV_COPY32(ptr+4*x+s->linesize*3, copy_dst+36);
1123 nnz = td->non_zero_count_cache[y][x];
1126 s->vp8dsp.vp8_idct_dc_add(ptr+4*x, td->block[y][x], s->linesize);
1128 s->vp8dsp.vp8_idct_add(ptr+4*x, td->block[y][x], s->linesize);
1133 ptr += 4*s->linesize;
1138 if (avctx->flags & CODEC_FLAG_EMU_EDGE) {
1139 mode = check_intra_pred8x8_mode_emuedge(mb->chroma_pred_mode, mb_x, mb_y);
1141 mode = check_intra_pred8x8_mode(mb->chroma_pred_mode, mb_x, mb_y);
1143 s->hpc.pred8x8[mode](dst[1], s->uvlinesize);
1144 s->hpc.pred8x8[mode](dst[2], s->uvlinesize);
1146 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE && !mb_y) && (s->deblock_filter || !mb_y) && td->thread_nr == 0)
1147 xchg_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2],
1148 s->linesize, s->uvlinesize, mb_x, mb_y, s->mb_width,
1149 s->filter.simple, 0);
1152 static const uint8_t subpel_idx[3][8] = {
1153 { 0, 1, 2, 1, 2, 1, 2, 1 }, // nr. of left extra pixels,
1154 // also function pointer index
1155 { 0, 3, 5, 3, 5, 3, 5, 3 }, // nr. of extra pixels required
1156 { 0, 2, 3, 2, 3, 2, 3, 2 }, // nr. of right extra pixels
1162 * @param s VP8 decoding context
1163 * @param dst target buffer for block data at block position
1164 * @param ref reference picture buffer at origin (0, 0)
1165 * @param mv motion vector (relative to block position) to get pixel data from
1166 * @param x_off horizontal position of block from origin (0, 0)
1167 * @param y_off vertical position of block from origin (0, 0)
1168 * @param block_w width of block (16, 8 or 4)
1169 * @param block_h height of block (always same as block_w)
1170 * @param width width of src/dst plane data
1171 * @param height height of src/dst plane data
1172 * @param linesize size of a single line of plane data, including padding
1173 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1175 static av_always_inline
1176 void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst,
1177 ThreadFrame *ref, const VP56mv *mv,
1178 int x_off, int y_off, int block_w, int block_h,
1179 int width, int height, int linesize,
1180 vp8_mc_func mc_func[3][3])
1182 uint8_t *src = ref->f->data[0];
1186 int mx = (mv->x << 1)&7, mx_idx = subpel_idx[0][mx];
1187 int my = (mv->y << 1)&7, my_idx = subpel_idx[0][my];
1189 x_off += mv->x >> 2;
1190 y_off += mv->y >> 2;
1193 ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 4, 0);
1194 src += y_off * linesize + x_off;
1195 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1196 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1197 s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src - my_idx * linesize - mx_idx, linesize,
1198 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1199 x_off - mx_idx, y_off - my_idx, width, height);
1200 src = td->edge_emu_buffer + mx_idx + linesize * my_idx;
1202 mc_func[my_idx][mx_idx](dst, linesize, src, linesize, block_h, mx, my);
1204 ff_thread_await_progress(ref, (3 + y_off + block_h) >> 4, 0);
1205 mc_func[0][0](dst, linesize, src + y_off * linesize + x_off, linesize, block_h, 0, 0);
1210 * chroma MC function
1212 * @param s VP8 decoding context
1213 * @param dst1 target buffer for block data at block position (U plane)
1214 * @param dst2 target buffer for block data at block position (V plane)
1215 * @param ref reference picture buffer at origin (0, 0)
1216 * @param mv motion vector (relative to block position) to get pixel data from
1217 * @param x_off horizontal position of block from origin (0, 0)
1218 * @param y_off vertical position of block from origin (0, 0)
1219 * @param block_w width of block (16, 8 or 4)
1220 * @param block_h height of block (always same as block_w)
1221 * @param width width of src/dst plane data
1222 * @param height height of src/dst plane data
1223 * @param linesize size of a single line of plane data, including padding
1224 * @param mc_func motion compensation function pointers (bilinear or sixtap MC)
1226 static av_always_inline
1227 void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2,
1228 ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off,
1229 int block_w, int block_h, int width, int height, int linesize,
1230 vp8_mc_func mc_func[3][3])
1232 uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2];
1235 int mx = mv->x&7, mx_idx = subpel_idx[0][mx];
1236 int my = mv->y&7, my_idx = subpel_idx[0][my];
1238 x_off += mv->x >> 3;
1239 y_off += mv->y >> 3;
1242 src1 += y_off * linesize + x_off;
1243 src2 += y_off * linesize + x_off;
1244 ff_thread_await_progress(ref, (3 + y_off + block_h + subpel_idx[2][my]) >> 3, 0);
1245 if (x_off < mx_idx || x_off >= width - block_w - subpel_idx[2][mx] ||
1246 y_off < my_idx || y_off >= height - block_h - subpel_idx[2][my]) {
1247 s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src1 - my_idx * linesize - mx_idx, linesize,
1248 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1249 x_off - mx_idx, y_off - my_idx, width, height);
1250 src1 = td->edge_emu_buffer + mx_idx + linesize * my_idx;
1251 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1253 s->vdsp.emulated_edge_mc(td->edge_emu_buffer, src2 - my_idx * linesize - mx_idx, linesize,
1254 block_w + subpel_idx[1][mx], block_h + subpel_idx[1][my],
1255 x_off - mx_idx, y_off - my_idx, width, height);
1256 src2 = td->edge_emu_buffer + mx_idx + linesize * my_idx;
1257 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1259 mc_func[my_idx][mx_idx](dst1, linesize, src1, linesize, block_h, mx, my);
1260 mc_func[my_idx][mx_idx](dst2, linesize, src2, linesize, block_h, mx, my);
1263 ff_thread_await_progress(ref, (3 + y_off + block_h) >> 3, 0);
1264 mc_func[0][0](dst1, linesize, src1 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1265 mc_func[0][0](dst2, linesize, src2 + y_off * linesize + x_off, linesize, block_h, 0, 0);
1269 static av_always_inline
1270 void vp8_mc_part(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
1271 ThreadFrame *ref_frame, int x_off, int y_off,
1272 int bx_off, int by_off,
1273 int block_w, int block_h,
1274 int width, int height, VP56mv *mv)
1279 vp8_mc_luma(s, td, dst[0] + by_off * s->linesize + bx_off,
1280 ref_frame, mv, x_off + bx_off, y_off + by_off,
1281 block_w, block_h, width, height, s->linesize,
1282 s->put_pixels_tab[block_w == 8]);
1285 if (s->profile == 3) {
1289 x_off >>= 1; y_off >>= 1;
1290 bx_off >>= 1; by_off >>= 1;
1291 width >>= 1; height >>= 1;
1292 block_w >>= 1; block_h >>= 1;
1293 vp8_mc_chroma(s, td, dst[1] + by_off * s->uvlinesize + bx_off,
1294 dst[2] + by_off * s->uvlinesize + bx_off, ref_frame,
1295 &uvmv, x_off + bx_off, y_off + by_off,
1296 block_w, block_h, width, height, s->uvlinesize,
1297 s->put_pixels_tab[1 + (block_w == 4)]);
1300 /* Fetch pixels for estimated mv 4 macroblocks ahead.
1301 * Optimized for 64-byte cache lines. Inspired by ffh264 prefetch_motion. */
1302 static av_always_inline void prefetch_motion(VP8Context *s, VP8Macroblock *mb, int mb_x, int mb_y, int mb_xy, int ref)
1304 /* Don't prefetch refs that haven't been used very often this frame. */
1305 if (s->ref_count[ref-1] > (mb_xy >> 5)) {
1306 int x_off = mb_x << 4, y_off = mb_y << 4;
1307 int mx = (mb->mv.x>>2) + x_off + 8;
1308 int my = (mb->mv.y>>2) + y_off;
1309 uint8_t **src= s->framep[ref]->tf.f->data;
1310 int off= mx + (my + (mb_x&3)*4)*s->linesize + 64;
1311 /* For threading, a ff_thread_await_progress here might be useful, but
1312 * it actually slows down the decoder. Since a bad prefetch doesn't
1313 * generate bad decoder output, we don't run it here. */
1314 s->vdsp.prefetch(src[0]+off, s->linesize, 4);
1315 off= (mx>>1) + ((my>>1) + (mb_x&7))*s->uvlinesize + 64;
1316 s->vdsp.prefetch(src[1]+off, src[2]-src[1], 2);
1321 * Apply motion vectors to prediction buffer, chapter 18.
1323 static av_always_inline
1324 void inter_predict(VP8Context *s, VP8ThreadData *td, uint8_t *dst[3],
1325 VP8Macroblock *mb, int mb_x, int mb_y)
1327 int x_off = mb_x << 4, y_off = mb_y << 4;
1328 int width = 16*s->mb_width, height = 16*s->mb_height;
1329 ThreadFrame *ref = &s->framep[mb->ref_frame]->tf;
1330 VP56mv *bmv = mb->bmv;
1332 switch (mb->partitioning) {
1333 case VP8_SPLITMVMODE_NONE:
1334 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1335 0, 0, 16, 16, width, height, &mb->mv);
1337 case VP8_SPLITMVMODE_4x4: {
1342 for (y = 0; y < 4; y++) {
1343 for (x = 0; x < 4; x++) {
1344 vp8_mc_luma(s, td, dst[0] + 4*y*s->linesize + x*4,
1346 4*x + x_off, 4*y + y_off, 4, 4,
1347 width, height, s->linesize,
1348 s->put_pixels_tab[2]);
1353 x_off >>= 1; y_off >>= 1; width >>= 1; height >>= 1;
1354 for (y = 0; y < 2; y++) {
1355 for (x = 0; x < 2; x++) {
1356 uvmv.x = mb->bmv[ 2*y * 4 + 2*x ].x +
1357 mb->bmv[ 2*y * 4 + 2*x+1].x +
1358 mb->bmv[(2*y+1) * 4 + 2*x ].x +
1359 mb->bmv[(2*y+1) * 4 + 2*x+1].x;
1360 uvmv.y = mb->bmv[ 2*y * 4 + 2*x ].y +
1361 mb->bmv[ 2*y * 4 + 2*x+1].y +
1362 mb->bmv[(2*y+1) * 4 + 2*x ].y +
1363 mb->bmv[(2*y+1) * 4 + 2*x+1].y;
1364 uvmv.x = (uvmv.x + 2 + (uvmv.x >> (INT_BIT-1))) >> 2;
1365 uvmv.y = (uvmv.y + 2 + (uvmv.y >> (INT_BIT-1))) >> 2;
1366 if (s->profile == 3) {
1370 vp8_mc_chroma(s, td, dst[1] + 4*y*s->uvlinesize + x*4,
1371 dst[2] + 4*y*s->uvlinesize + x*4, ref, &uvmv,
1372 4*x + x_off, 4*y + y_off, 4, 4,
1373 width, height, s->uvlinesize,
1374 s->put_pixels_tab[2]);
1379 case VP8_SPLITMVMODE_16x8:
1380 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1381 0, 0, 16, 8, width, height, &bmv[0]);
1382 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1383 0, 8, 16, 8, width, height, &bmv[1]);
1385 case VP8_SPLITMVMODE_8x16:
1386 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1387 0, 0, 8, 16, width, height, &bmv[0]);
1388 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1389 8, 0, 8, 16, width, height, &bmv[1]);
1391 case VP8_SPLITMVMODE_8x8:
1392 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1393 0, 0, 8, 8, width, height, &bmv[0]);
1394 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1395 8, 0, 8, 8, width, height, &bmv[1]);
1396 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1397 0, 8, 8, 8, width, height, &bmv[2]);
1398 vp8_mc_part(s, td, dst, ref, x_off, y_off,
1399 8, 8, 8, 8, width, height, &bmv[3]);
1404 static av_always_inline void idct_mb(VP8Context *s, VP8ThreadData *td,
1405 uint8_t *dst[3], VP8Macroblock *mb)
1409 if (mb->mode != MODE_I4x4) {
1410 uint8_t *y_dst = dst[0];
1411 for (y = 0; y < 4; y++) {
1412 uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[y]);
1414 if (nnz4&~0x01010101) {
1415 for (x = 0; x < 4; x++) {
1416 if ((uint8_t)nnz4 == 1)
1417 s->vp8dsp.vp8_idct_dc_add(y_dst+4*x, td->block[y][x], s->linesize);
1418 else if((uint8_t)nnz4 > 1)
1419 s->vp8dsp.vp8_idct_add(y_dst+4*x, td->block[y][x], s->linesize);
1425 s->vp8dsp.vp8_idct_dc_add4y(y_dst, td->block[y], s->linesize);
1428 y_dst += 4*s->linesize;
1432 for (ch = 0; ch < 2; ch++) {
1433 uint32_t nnz4 = AV_RL32(td->non_zero_count_cache[4+ch]);
1435 uint8_t *ch_dst = dst[1+ch];
1436 if (nnz4&~0x01010101) {
1437 for (y = 0; y < 2; y++) {
1438 for (x = 0; x < 2; x++) {
1439 if ((uint8_t)nnz4 == 1)
1440 s->vp8dsp.vp8_idct_dc_add(ch_dst+4*x, td->block[4+ch][(y<<1)+x], s->uvlinesize);
1441 else if((uint8_t)nnz4 > 1)
1442 s->vp8dsp.vp8_idct_add(ch_dst+4*x, td->block[4+ch][(y<<1)+x], s->uvlinesize);
1445 goto chroma_idct_end;
1447 ch_dst += 4*s->uvlinesize;
1450 s->vp8dsp.vp8_idct_dc_add4uv(ch_dst, td->block[4+ch], s->uvlinesize);
1457 static av_always_inline void filter_level_for_mb(VP8Context *s, VP8Macroblock *mb, VP8FilterStrength *f )
1459 int interior_limit, filter_level;
1461 if (s->segmentation.enabled) {
1462 filter_level = s->segmentation.filter_level[mb->segment];
1463 if (!s->segmentation.absolute_vals)
1464 filter_level += s->filter.level;
1466 filter_level = s->filter.level;
1468 if (s->lf_delta.enabled) {
1469 filter_level += s->lf_delta.ref[mb->ref_frame];
1470 filter_level += s->lf_delta.mode[mb->mode];
1473 filter_level = av_clip_uintp2(filter_level, 6);
1475 interior_limit = filter_level;
1476 if (s->filter.sharpness) {
1477 interior_limit >>= (s->filter.sharpness + 3) >> 2;
1478 interior_limit = FFMIN(interior_limit, 9 - s->filter.sharpness);
1480 interior_limit = FFMAX(interior_limit, 1);
1482 f->filter_level = filter_level;
1483 f->inner_limit = interior_limit;
1484 f->inner_filter = !mb->skip || mb->mode == MODE_I4x4 || mb->mode == VP8_MVMODE_SPLIT;
1487 static av_always_inline void filter_mb(VP8Context *s, uint8_t *dst[3], VP8FilterStrength *f, int mb_x, int mb_y)
1489 int mbedge_lim, bedge_lim, hev_thresh;
1490 int filter_level = f->filter_level;
1491 int inner_limit = f->inner_limit;
1492 int inner_filter = f->inner_filter;
1493 int linesize = s->linesize;
1494 int uvlinesize = s->uvlinesize;
1495 static const uint8_t hev_thresh_lut[2][64] = {
1496 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1497 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1498 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
1500 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1,
1501 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1502 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
1509 bedge_lim = 2*filter_level + inner_limit;
1510 mbedge_lim = bedge_lim + 4;
1512 hev_thresh = hev_thresh_lut[s->keyframe][filter_level];
1515 s->vp8dsp.vp8_h_loop_filter16y(dst[0], linesize,
1516 mbedge_lim, inner_limit, hev_thresh);
1517 s->vp8dsp.vp8_h_loop_filter8uv(dst[1], dst[2], uvlinesize,
1518 mbedge_lim, inner_limit, hev_thresh);
1522 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 4, linesize, bedge_lim,
1523 inner_limit, hev_thresh);
1524 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+ 8, linesize, bedge_lim,
1525 inner_limit, hev_thresh);
1526 s->vp8dsp.vp8_h_loop_filter16y_inner(dst[0]+12, linesize, bedge_lim,
1527 inner_limit, hev_thresh);
1528 s->vp8dsp.vp8_h_loop_filter8uv_inner(dst[1] + 4, dst[2] + 4,
1529 uvlinesize, bedge_lim,
1530 inner_limit, hev_thresh);
1534 s->vp8dsp.vp8_v_loop_filter16y(dst[0], linesize,
1535 mbedge_lim, inner_limit, hev_thresh);
1536 s->vp8dsp.vp8_v_loop_filter8uv(dst[1], dst[2], uvlinesize,
1537 mbedge_lim, inner_limit, hev_thresh);
1541 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 4*linesize,
1542 linesize, bedge_lim,
1543 inner_limit, hev_thresh);
1544 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+ 8*linesize,
1545 linesize, bedge_lim,
1546 inner_limit, hev_thresh);
1547 s->vp8dsp.vp8_v_loop_filter16y_inner(dst[0]+12*linesize,
1548 linesize, bedge_lim,
1549 inner_limit, hev_thresh);
1550 s->vp8dsp.vp8_v_loop_filter8uv_inner(dst[1] + 4 * uvlinesize,
1551 dst[2] + 4 * uvlinesize,
1552 uvlinesize, bedge_lim,
1553 inner_limit, hev_thresh);
1557 static av_always_inline void filter_mb_simple(VP8Context *s, uint8_t *dst, VP8FilterStrength *f, int mb_x, int mb_y)
1559 int mbedge_lim, bedge_lim;
1560 int filter_level = f->filter_level;
1561 int inner_limit = f->inner_limit;
1562 int inner_filter = f->inner_filter;
1563 int linesize = s->linesize;
1568 bedge_lim = 2*filter_level + inner_limit;
1569 mbedge_lim = bedge_lim + 4;
1572 s->vp8dsp.vp8_h_loop_filter_simple(dst, linesize, mbedge_lim);
1574 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 4, linesize, bedge_lim);
1575 s->vp8dsp.vp8_h_loop_filter_simple(dst+ 8, linesize, bedge_lim);
1576 s->vp8dsp.vp8_h_loop_filter_simple(dst+12, linesize, bedge_lim);
1580 s->vp8dsp.vp8_v_loop_filter_simple(dst, linesize, mbedge_lim);
1582 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 4*linesize, linesize, bedge_lim);
1583 s->vp8dsp.vp8_v_loop_filter_simple(dst+ 8*linesize, linesize, bedge_lim);
1584 s->vp8dsp.vp8_v_loop_filter_simple(dst+12*linesize, linesize, bedge_lim);
1588 #define MARGIN (16 << 2)
1589 static void vp8_decode_mv_mb_modes(AVCodecContext *avctx, VP8Frame *curframe,
1590 VP8Frame *prev_frame)
1592 VP8Context *s = avctx->priv_data;
1595 s->mv_min.y = -MARGIN;
1596 s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
1597 for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
1598 VP8Macroblock *mb = s->macroblocks_base + ((s->mb_width+1)*(mb_y + 1) + 1);
1599 int mb_xy = mb_y*s->mb_width;
1601 AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED*0x01010101);
1603 s->mv_min.x = -MARGIN;
1604 s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
1605 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1607 AV_WN32A((mb-s->mb_width-1)->intra4x4_pred_mode_top, DC_PRED*0x01010101);
1608 decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
1609 prev_frame && prev_frame->seg_map ?
1610 prev_frame->seg_map->data + mb_xy : NULL, 1);
1620 #define check_thread_pos(td, otd, mb_x_check, mb_y_check)\
1622 int tmp = (mb_y_check << 16) | (mb_x_check & 0xFFFF);\
1623 if (otd->thread_mb_pos < tmp) {\
1624 pthread_mutex_lock(&otd->lock);\
1625 td->wait_mb_pos = tmp;\
1627 if (otd->thread_mb_pos >= tmp)\
1629 pthread_cond_wait(&otd->cond, &otd->lock);\
1631 td->wait_mb_pos = INT_MAX;\
1632 pthread_mutex_unlock(&otd->lock);\
1636 #define update_pos(td, mb_y, mb_x)\
1638 int pos = (mb_y << 16) | (mb_x & 0xFFFF);\
1639 int sliced_threading = (avctx->active_thread_type == FF_THREAD_SLICE) && (num_jobs > 1);\
1640 int is_null = (next_td == NULL) || (prev_td == NULL);\
1641 int pos_check = (is_null) ? 1 :\
1642 (next_td != td && pos >= next_td->wait_mb_pos) ||\
1643 (prev_td != td && pos >= prev_td->wait_mb_pos);\
1644 td->thread_mb_pos = pos;\
1645 if (sliced_threading && pos_check) {\
1646 pthread_mutex_lock(&td->lock);\
1647 pthread_cond_broadcast(&td->cond);\
1648 pthread_mutex_unlock(&td->lock);\
1652 #define check_thread_pos(td, otd, mb_x_check, mb_y_check)
1653 #define update_pos(td, mb_y, mb_x)
1656 static void vp8_decode_mb_row_no_filter(AVCodecContext *avctx, void *tdata,
1657 int jobnr, int threadnr)
1659 VP8Context *s = avctx->priv_data;
1660 VP8ThreadData *prev_td, *next_td, *td = &s->thread_data[threadnr];
1661 int mb_y = td->thread_mb_pos>>16;
1662 int i, y, mb_x, mb_xy = mb_y*s->mb_width;
1663 int num_jobs = s->num_jobs;
1664 VP8Frame *curframe = s->curframe, *prev_frame = s->prev_frame;
1665 VP56RangeCoder *c = &s->coeff_partition[mb_y & (s->num_coeff_partitions-1)];
1668 curframe->tf.f->data[0] + 16*mb_y*s->linesize,
1669 curframe->tf.f->data[1] + 8*mb_y*s->uvlinesize,
1670 curframe->tf.f->data[2] + 8*mb_y*s->uvlinesize
1672 if (mb_y == 0) prev_td = td;
1673 else prev_td = &s->thread_data[(jobnr + num_jobs - 1)%num_jobs];
1674 if (mb_y == s->mb_height-1) next_td = td;
1675 else next_td = &s->thread_data[(jobnr + 1)%num_jobs];
1676 if (s->mb_layout == 1)
1677 mb = s->macroblocks_base + ((s->mb_width+1)*(mb_y + 1) + 1);
1679 mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1680 memset(mb - 1, 0, sizeof(*mb)); // zero left macroblock
1681 AV_WN32A(s->intra4x4_pred_mode_left, DC_PRED*0x01010101);
1684 memset(td->left_nnz, 0, sizeof(td->left_nnz));
1685 // left edge of 129 for intra prediction
1686 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1687 for (i = 0; i < 3; i++)
1688 for (y = 0; y < 16>>!!i; y++)
1689 dst[i][y*curframe->tf.f->linesize[i]-1] = 129;
1691 s->top_border[0][15] = s->top_border[0][23] = s->top_border[0][31] = 129;
1695 s->mv_min.x = -MARGIN;
1696 s->mv_max.x = ((s->mb_width - 1) << 6) + MARGIN;
1698 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb_xy++, mb++) {
1699 // Wait for previous thread to read mb_x+2, and reach mb_y-1.
1700 if (prev_td != td) {
1701 if (threadnr != 0) {
1702 check_thread_pos(td, prev_td, mb_x+1, mb_y-1);
1704 check_thread_pos(td, prev_td, (s->mb_width+3) + (mb_x+1), mb_y-1);
1708 s->vdsp.prefetch(dst[0] + (mb_x&3)*4*s->linesize + 64, s->linesize, 4);
1709 s->vdsp.prefetch(dst[1] + (mb_x&7)*s->uvlinesize + 64, dst[2] - dst[1], 2);
1712 decode_mb_mode(s, mb, mb_x, mb_y, curframe->seg_map->data + mb_xy,
1713 prev_frame && prev_frame->seg_map ?
1714 prev_frame->seg_map->data + mb_xy : NULL, 0);
1716 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_PREVIOUS);
1719 decode_mb_coeffs(s, td, c, mb, s->top_nnz[mb_x], td->left_nnz);
1721 if (mb->mode <= MODE_I4x4)
1722 intra_predict(s, td, dst, mb, mb_x, mb_y);
1724 inter_predict(s, td, dst, mb, mb_x, mb_y);
1726 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN);
1729 idct_mb(s, td, dst, mb);
1731 AV_ZERO64(td->left_nnz);
1732 AV_WN64(s->top_nnz[mb_x], 0); // array of 9, so unaligned
1734 // Reset DC block predictors if they would exist if the mb had coefficients
1735 if (mb->mode != MODE_I4x4 && mb->mode != VP8_MVMODE_SPLIT) {
1736 td->left_nnz[8] = 0;
1737 s->top_nnz[mb_x][8] = 0;
1741 if (s->deblock_filter)
1742 filter_level_for_mb(s, mb, &td->filter_strength[mb_x]);
1744 if (s->deblock_filter && num_jobs != 1 && threadnr == num_jobs-1) {
1745 if (s->filter.simple)
1746 backup_mb_border(s->top_border[mb_x+1], dst[0], NULL, NULL, s->linesize, 0, 1);
1748 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1751 prefetch_motion(s, mb, mb_x, mb_y, mb_xy, VP56_FRAME_GOLDEN2);
1759 if (mb_x == s->mb_width+1) {
1760 update_pos(td, mb_y, s->mb_width+3);
1762 update_pos(td, mb_y, mb_x);
1767 static void vp8_filter_mb_row(AVCodecContext *avctx, void *tdata,
1768 int jobnr, int threadnr)
1770 VP8Context *s = avctx->priv_data;
1771 VP8ThreadData *td = &s->thread_data[threadnr];
1772 int mb_x, mb_y = td->thread_mb_pos>>16, num_jobs = s->num_jobs;
1773 AVFrame *curframe = s->curframe->tf.f;
1775 VP8ThreadData *prev_td, *next_td;
1777 curframe->data[0] + 16*mb_y*s->linesize,
1778 curframe->data[1] + 8*mb_y*s->uvlinesize,
1779 curframe->data[2] + 8*mb_y*s->uvlinesize
1782 if (s->mb_layout == 1)
1783 mb = s->macroblocks_base + ((s->mb_width+1)*(mb_y + 1) + 1);
1785 mb = s->macroblocks + (s->mb_height - mb_y - 1)*2;
1787 if (mb_y == 0) prev_td = td;
1788 else prev_td = &s->thread_data[(jobnr + num_jobs - 1)%num_jobs];
1789 if (mb_y == s->mb_height-1) next_td = td;
1790 else next_td = &s->thread_data[(jobnr + 1)%num_jobs];
1792 for (mb_x = 0; mb_x < s->mb_width; mb_x++, mb++) {
1793 VP8FilterStrength *f = &td->filter_strength[mb_x];
1794 if (prev_td != td) {
1795 check_thread_pos(td, prev_td, (mb_x+1) + (s->mb_width+3), mb_y-1);
1798 if (next_td != &s->thread_data[0]) {
1799 check_thread_pos(td, next_td, mb_x+1, mb_y+1);
1802 if (num_jobs == 1) {
1803 if (s->filter.simple)
1804 backup_mb_border(s->top_border[mb_x+1], dst[0], NULL, NULL, s->linesize, 0, 1);
1806 backup_mb_border(s->top_border[mb_x+1], dst[0], dst[1], dst[2], s->linesize, s->uvlinesize, 0);
1809 if (s->filter.simple)
1810 filter_mb_simple(s, dst[0], f, mb_x, mb_y);
1812 filter_mb(s, dst, f, mb_x, mb_y);
1817 update_pos(td, mb_y, (s->mb_width+3) + mb_x);
1821 static int vp8_decode_mb_row_sliced(AVCodecContext *avctx, void *tdata,
1822 int jobnr, int threadnr)
1824 VP8Context *s = avctx->priv_data;
1825 VP8ThreadData *td = &s->thread_data[jobnr];
1826 VP8ThreadData *next_td = NULL, *prev_td = NULL;
1827 VP8Frame *curframe = s->curframe;
1828 int mb_y, num_jobs = s->num_jobs;
1829 td->thread_nr = threadnr;
1830 for (mb_y = jobnr; mb_y < s->mb_height; mb_y += num_jobs) {
1831 if (mb_y >= s->mb_height) break;
1832 td->thread_mb_pos = mb_y<<16;
1833 vp8_decode_mb_row_no_filter(avctx, tdata, jobnr, threadnr);
1834 if (s->deblock_filter)
1835 vp8_filter_mb_row(avctx, tdata, jobnr, threadnr);
1836 update_pos(td, mb_y, INT_MAX & 0xFFFF);
1841 if (avctx->active_thread_type == FF_THREAD_FRAME)
1842 ff_thread_report_progress(&curframe->tf, mb_y, 0);
1848 static int vp8_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
1851 VP8Context *s = avctx->priv_data;
1852 int ret, i, referenced, num_jobs;
1853 enum AVDiscard skip_thresh;
1854 VP8Frame *av_uninit(curframe), *prev_frame;
1856 if ((ret = decode_frame_header(s, avpkt->data, avpkt->size)) < 0)
1859 prev_frame = s->framep[VP56_FRAME_CURRENT];
1861 referenced = s->update_last || s->update_golden == VP56_FRAME_CURRENT
1862 || s->update_altref == VP56_FRAME_CURRENT;
1864 skip_thresh = !referenced ? AVDISCARD_NONREF :
1865 !s->keyframe ? AVDISCARD_NONKEY : AVDISCARD_ALL;
1867 if (avctx->skip_frame >= skip_thresh) {
1869 memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
1872 s->deblock_filter = s->filter.level && avctx->skip_loop_filter < skip_thresh;
1874 // release no longer referenced frames
1875 for (i = 0; i < 5; i++)
1876 if (s->frames[i].tf.f->data[0] &&
1877 &s->frames[i] != prev_frame &&
1878 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1879 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1880 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2])
1881 vp8_release_frame(s, &s->frames[i]);
1883 // find a free buffer
1884 for (i = 0; i < 5; i++)
1885 if (&s->frames[i] != prev_frame &&
1886 &s->frames[i] != s->framep[VP56_FRAME_PREVIOUS] &&
1887 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN] &&
1888 &s->frames[i] != s->framep[VP56_FRAME_GOLDEN2]) {
1889 curframe = s->framep[VP56_FRAME_CURRENT] = &s->frames[i];
1893 av_log(avctx, AV_LOG_FATAL, "Ran out of free frames!\n");
1896 if (curframe->tf.f->data[0])
1897 vp8_release_frame(s, curframe);
1899 // Given that arithmetic probabilities are updated every frame, it's quite likely
1900 // that the values we have on a random interframe are complete junk if we didn't
1901 // start decode on a keyframe. So just don't display anything rather than junk.
1902 if (!s->keyframe && (!s->framep[VP56_FRAME_PREVIOUS] ||
1903 !s->framep[VP56_FRAME_GOLDEN] ||
1904 !s->framep[VP56_FRAME_GOLDEN2])) {
1905 av_log(avctx, AV_LOG_WARNING, "Discarding interframe without a prior keyframe!\n");
1906 ret = AVERROR_INVALIDDATA;
1910 curframe->tf.f->key_frame = s->keyframe;
1911 curframe->tf.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
1912 if ((ret = vp8_alloc_frame(s, curframe, referenced)) < 0)
1915 // check if golden and altref are swapped
1916 if (s->update_altref != VP56_FRAME_NONE) {
1917 s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[s->update_altref];
1919 s->next_framep[VP56_FRAME_GOLDEN2] = s->framep[VP56_FRAME_GOLDEN2];
1921 if (s->update_golden != VP56_FRAME_NONE) {
1922 s->next_framep[VP56_FRAME_GOLDEN] = s->framep[s->update_golden];
1924 s->next_framep[VP56_FRAME_GOLDEN] = s->framep[VP56_FRAME_GOLDEN];
1926 if (s->update_last) {
1927 s->next_framep[VP56_FRAME_PREVIOUS] = curframe;
1929 s->next_framep[VP56_FRAME_PREVIOUS] = s->framep[VP56_FRAME_PREVIOUS];
1931 s->next_framep[VP56_FRAME_CURRENT] = curframe;
1933 ff_thread_finish_setup(avctx);
1935 s->linesize = curframe->tf.f->linesize[0];
1936 s->uvlinesize = curframe->tf.f->linesize[1];
1938 if (!s->thread_data[0].edge_emu_buffer)
1939 for (i = 0; i < MAX_THREADS; i++)
1940 s->thread_data[i].edge_emu_buffer = av_malloc(21*s->linesize);
1942 memset(s->top_nnz, 0, s->mb_width*sizeof(*s->top_nnz));
1943 /* Zero macroblock structures for top/top-left prediction from outside the frame. */
1945 memset(s->macroblocks + s->mb_height*2 - 1, 0, (s->mb_width+1)*sizeof(*s->macroblocks));
1946 if (!s->mb_layout && s->keyframe)
1947 memset(s->intra4x4_pred_mode_top, DC_PRED, s->mb_width*4);
1949 // top edge of 127 for intra prediction
1950 if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) {
1951 s->top_border[0][15] = s->top_border[0][23] = 127;
1952 s->top_border[0][31] = 127;
1953 memset(s->top_border[1], 127, s->mb_width*sizeof(*s->top_border));
1955 memset(s->ref_count, 0, sizeof(s->ref_count));
1958 // Make sure the previous frame has read its segmentation map,
1959 // if we re-use the same map.
1960 if (prev_frame && s->segmentation.enabled && !s->segmentation.update_map)
1961 ff_thread_await_progress(&prev_frame->tf, 1, 0);
1963 if (s->mb_layout == 1)
1964 vp8_decode_mv_mb_modes(avctx, curframe, prev_frame);
1966 if (avctx->active_thread_type == FF_THREAD_FRAME)
1969 num_jobs = FFMIN(s->num_coeff_partitions, avctx->thread_count);
1970 s->num_jobs = num_jobs;
1971 s->curframe = curframe;
1972 s->prev_frame = prev_frame;
1973 s->mv_min.y = -MARGIN;
1974 s->mv_max.y = ((s->mb_height - 1) << 6) + MARGIN;
1975 for (i = 0; i < MAX_THREADS; i++) {
1976 s->thread_data[i].thread_mb_pos = 0;
1977 s->thread_data[i].wait_mb_pos = INT_MAX;
1979 avctx->execute2(avctx, vp8_decode_mb_row_sliced, s->thread_data, NULL, num_jobs);
1981 ff_thread_report_progress(&curframe->tf, INT_MAX, 0);
1982 memcpy(&s->framep[0], &s->next_framep[0], sizeof(s->framep[0]) * 4);
1985 // if future frames don't use the updated probabilities,
1986 // reset them to the values we saved
1987 if (!s->update_probabilities)
1988 s->prob[0] = s->prob[1];
1990 if (!s->invisible) {
1991 if ((ret = av_frame_ref(data, curframe->tf.f)) < 0)
1998 memcpy(&s->next_framep[0], &s->framep[0], sizeof(s->framep[0]) * 4);
2002 static av_cold int vp8_decode_free(AVCodecContext *avctx)
2004 VP8Context *s = avctx->priv_data;
2007 vp8_decode_flush_impl(avctx, 1);
2008 for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++)
2009 av_frame_free(&s->frames[i].tf.f);
2014 static av_cold int vp8_init_frames(VP8Context *s)
2017 for (i = 0; i < FF_ARRAY_ELEMS(s->frames); i++) {
2018 s->frames[i].tf.f = av_frame_alloc();
2019 if (!s->frames[i].tf.f)
2020 return AVERROR(ENOMEM);
2025 static av_cold int vp8_decode_init(AVCodecContext *avctx)
2027 VP8Context *s = avctx->priv_data;
2031 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
2032 avctx->internal->allocate_progress = 1;
2034 ff_videodsp_init(&s->vdsp, 8);
2035 ff_h264_pred_init(&s->hpc, AV_CODEC_ID_VP8, 8, 1);
2036 ff_vp8dsp_init(&s->vp8dsp);
2038 if ((ret = vp8_init_frames(s)) < 0) {
2039 vp8_decode_free(avctx);
2046 static av_cold int vp8_decode_init_thread_copy(AVCodecContext *avctx)
2048 VP8Context *s = avctx->priv_data;
2053 if ((ret = vp8_init_frames(s)) < 0) {
2054 vp8_decode_free(avctx);
2061 #define REBASE(pic) \
2062 pic ? pic - &s_src->frames[0] + &s->frames[0] : NULL
2064 static int vp8_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
2066 VP8Context *s = dst->priv_data, *s_src = src->priv_data;
2069 if (s->macroblocks_base &&
2070 (s_src->mb_width != s->mb_width || s_src->mb_height != s->mb_height)) {
2072 s->mb_width = s_src->mb_width;
2073 s->mb_height = s_src->mb_height;
2076 s->prob[0] = s_src->prob[!s_src->update_probabilities];
2077 s->segmentation = s_src->segmentation;
2078 s->lf_delta = s_src->lf_delta;
2079 memcpy(s->sign_bias, s_src->sign_bias, sizeof(s->sign_bias));
2081 for (i = 0; i < FF_ARRAY_ELEMS(s_src->frames); i++) {
2082 if (s_src->frames[i].tf.f->data[0]) {
2083 int ret = vp8_ref_frame(s, &s->frames[i], &s_src->frames[i]);
2089 s->framep[0] = REBASE(s_src->next_framep[0]);
2090 s->framep[1] = REBASE(s_src->next_framep[1]);
2091 s->framep[2] = REBASE(s_src->next_framep[2]);
2092 s->framep[3] = REBASE(s_src->next_framep[3]);
2097 AVCodec ff_vp8_decoder = {
2099 .type = AVMEDIA_TYPE_VIDEO,
2100 .id = AV_CODEC_ID_VP8,
2101 .priv_data_size = sizeof(VP8Context),
2102 .init = vp8_decode_init,
2103 .close = vp8_decode_free,
2104 .decode = vp8_decode_frame,
2105 .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS | CODEC_CAP_SLICE_THREADS,
2106 .flush = vp8_decode_flush,
2107 .long_name = NULL_IF_CONFIG_SMALL("On2 VP8"),
2108 .init_thread_copy = ONLY_IF_THREADS_ENABLED(vp8_decode_init_thread_copy),
2109 .update_thread_context = ONLY_IF_THREADS_ENABLED(vp8_decode_update_thread_context),