2 * VP9 compatible video decoder
4 * Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5 * Copyright (C) 2013 Clément Bœsch <u pkh me>
7 * This file is part of FFmpeg.
9 * FFmpeg is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * FFmpeg is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with FFmpeg; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
32 #include "libavutil/avassert.h"
34 #define VP9_SYNCCODE 0x498342
73 uint8_t /* bit=col */ mask[2 /* 0=y, 1=uv */][2 /* 0=col, 1=row */]
74 [8 /* rows */][4 /* 0=16, 1=8, 2=4, 3=inner4 */];
77 typedef struct VP9Block {
78 uint8_t seg_id, intra, comp, ref[2], mode[4], uvmode, skip;
79 enum FilterMode filter;
80 VP56mv mv[4 /* b_idx */][2 /* ref */];
82 enum TxfmMode tx, uvtx;
84 int row, row7, col, col7;
86 ptrdiff_t y_stride, uv_stride;
89 typedef struct VP9Context {
100 uint8_t keyframe, last_keyframe;
102 uint8_t use_last_frame_mvs;
108 uint8_t refreshrefmask;
109 uint8_t highprecisionmvs;
110 enum FilterMode filtermode;
111 uint8_t allowcompinter;
114 uint8_t parallelmode;
118 uint8_t varcompref[2];
119 AVFrame *refs[8], *f, *fb[10];
125 uint8_t mblim_lut[64];
133 int8_t ydc_qdelta, uvdc_qdelta, uvac_qdelta;
138 uint8_t absolute_vals;
144 uint8_t skip_enabled;
153 unsigned log2_tile_cols, log2_tile_rows;
154 unsigned tile_cols, tile_rows;
155 unsigned tile_row_start, tile_row_end, tile_col_start, tile_col_end;
157 unsigned sb_cols, sb_rows, rows, cols;
160 uint8_t coef[4][2][2][6][6][3];
164 uint8_t coef[4][2][2][6][6][11];
169 unsigned y_mode[4][10];
170 unsigned uv_mode[10][10];
171 unsigned filter[4][3];
172 unsigned mv_mode[7][4];
173 unsigned intra[4][2];
175 unsigned single_ref[5][2][2];
176 unsigned comp_ref[5][2];
177 unsigned tx32p[2][4];
178 unsigned tx16p[2][3];
181 unsigned mv_joint[4];
184 unsigned classes[11];
186 unsigned bits[10][2];
187 unsigned class0_fp[2][4];
189 unsigned class0_hp[2];
192 unsigned partition[4][4][4];
193 unsigned coef[4][2][2][6][6][3];
194 unsigned eob[4][2][2][6][6][2];
196 enum TxfmMode txfmmode;
197 enum CompPredMode comppredmode;
199 // contextual (left/above) cache
200 uint8_t left_partition_ctx[8], *above_partition_ctx;
201 uint8_t left_mode_ctx[16], *above_mode_ctx;
202 // FIXME maybe merge some of the below in a flags field?
203 uint8_t left_y_nnz_ctx[16], *above_y_nnz_ctx;
204 uint8_t left_uv_nnz_ctx[2][8], *above_uv_nnz_ctx[2];
205 uint8_t left_skip_ctx[8], *above_skip_ctx; // 1bit
206 uint8_t left_txfm_ctx[8], *above_txfm_ctx; // 2bit
207 uint8_t left_segpred_ctx[8], *above_segpred_ctx; // 1bit
208 uint8_t left_intra_ctx[8], *above_intra_ctx; // 1bit
209 uint8_t left_comp_ctx[8], *above_comp_ctx; // 1bit
210 uint8_t left_ref_ctx[8], *above_ref_ctx; // 2bit
211 uint8_t left_filter_ctx[8], *above_filter_ctx;
212 VP56mv left_mv_ctx[16][2], (*above_mv_ctx)[2];
215 uint8_t *intra_pred_data[3];
216 uint8_t *segmentation_map;
217 struct VP9mvrefPair *mv[2];
218 struct VP9Filter *lflvl;
219 DECLARE_ALIGNED(32, uint8_t, edge_emu_buffer)[71*80];
221 // block reconstruction intermediates
222 DECLARE_ALIGNED(32, int16_t, block)[4096];
223 DECLARE_ALIGNED(32, int16_t, uvblock)[2][1024];
225 uint8_t uveob[2][64];
226 VP56mv min_mv, max_mv;
227 DECLARE_ALIGNED(32, uint8_t, tmp_y)[64*64];
228 DECLARE_ALIGNED(32, uint8_t, tmp_uv)[2][32*32];
231 static const uint8_t bwh_tab[2][N_BS_SIZES][2] = {
233 { 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 },
234 { 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 },
236 { 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 },
237 { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
241 static int update_size(AVCodecContext *ctx, int w, int h)
243 VP9Context *s = ctx->priv_data;
246 if (s->above_partition_ctx && w == ctx->width && h == ctx->height)
251 s->sb_cols = (w + 63) >> 6;
252 s->sb_rows = (h + 63) >> 6;
253 s->cols = (w + 7) >> 3;
254 s->rows = (h + 7) >> 3;
256 #define assign(var, type, n) var = (type) p; p += s->sb_cols * n * sizeof(*var)
257 av_freep(&s->above_partition_ctx);
258 p = av_malloc(s->sb_cols * (240 + sizeof(*s->lflvl) + 16 * sizeof(*s->above_mv_ctx) +
259 64 * s->sb_rows * (1 + sizeof(*s->mv[0]) * 2)));
261 return AVERROR(ENOMEM);
262 assign(s->above_partition_ctx, uint8_t *, 8);
263 assign(s->above_skip_ctx, uint8_t *, 8);
264 assign(s->above_txfm_ctx, uint8_t *, 8);
265 assign(s->above_mode_ctx, uint8_t *, 16);
266 assign(s->above_y_nnz_ctx, uint8_t *, 16);
267 assign(s->above_uv_nnz_ctx[0], uint8_t *, 8);
268 assign(s->above_uv_nnz_ctx[1], uint8_t *, 8);
269 assign(s->intra_pred_data[0], uint8_t *, 64);
270 assign(s->intra_pred_data[1], uint8_t *, 32);
271 assign(s->intra_pred_data[2], uint8_t *, 32);
272 assign(s->above_segpred_ctx, uint8_t *, 8);
273 assign(s->above_intra_ctx, uint8_t *, 8);
274 assign(s->above_comp_ctx, uint8_t *, 8);
275 assign(s->above_ref_ctx, uint8_t *, 8);
276 assign(s->above_filter_ctx, uint8_t *, 8);
277 assign(s->lflvl, struct VP9Filter *, 1);
278 assign(s->above_mv_ctx, VP56mv(*)[2], 16);
279 assign(s->segmentation_map, uint8_t *, 64 * s->sb_rows);
280 assign(s->mv[0], struct VP9mvrefPair *, 64 * s->sb_rows);
281 assign(s->mv[1], struct VP9mvrefPair *, 64 * s->sb_rows);
287 // for some reason the sign bit is at the end, not the start, of a bit sequence
288 static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
290 int v = get_bits(gb, n);
291 return get_bits1(gb) ? -v : v;
294 static av_always_inline int inv_recenter_nonneg(int v, int m)
296 return v > 2 * m ? v : v & 1 ? m - ((v + 1) >> 1) : m + (v >> 1);
299 // differential forward probability updates
300 static int update_prob(VP56RangeCoder *c, int p)
302 static const int inv_map_table[254] = {
303 7, 20, 33, 46, 59, 72, 85, 98, 111, 124, 137, 150, 163, 176,
304 189, 202, 215, 228, 241, 254, 1, 2, 3, 4, 5, 6, 8, 9,
305 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 24,
306 25, 26, 27, 28, 29, 30, 31, 32, 34, 35, 36, 37, 38, 39,
307 40, 41, 42, 43, 44, 45, 47, 48, 49, 50, 51, 52, 53, 54,
308 55, 56, 57, 58, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
309 70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
310 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100,
311 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 112, 113, 114, 115,
312 116, 117, 118, 119, 120, 121, 122, 123, 125, 126, 127, 128, 129, 130,
313 131, 132, 133, 134, 135, 136, 138, 139, 140, 141, 142, 143, 144, 145,
314 146, 147, 148, 149, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160,
315 161, 162, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175,
316 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 190, 191,
317 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 203, 204, 205, 206,
318 207, 208, 209, 210, 211, 212, 213, 214, 216, 217, 218, 219, 220, 221,
319 222, 223, 224, 225, 226, 227, 229, 230, 231, 232, 233, 234, 235, 236,
320 237, 238, 239, 240, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251,
325 /* This code is trying to do a differential probability update. For a
326 * current probability A in the range [1, 255], the difference to a new
327 * probability of any value can be expressed differentially as 1-A,255-A
328 * where some part of this (absolute range) exists both in positive as
329 * well as the negative part, whereas another part only exists in one
330 * half. We're trying to code this shared part differentially, i.e.
331 * times two where the value of the lowest bit specifies the sign, and
332 * the single part is then coded on top of this. This absolute difference
333 * then again has a value of [0,254], but a bigger value in this range
334 * indicates that we're further away from the original value A, so we
335 * can code this as a VLC code, since higher values are increasingly
336 * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
337 * updates vs. the 'fine, exact' updates further down the range, which
338 * adds one extra dimension to this differential update model. */
340 if (!vp8_rac_get(c)) {
341 d = vp8_rac_get_uint(c, 4) + 0;
342 } else if (!vp8_rac_get(c)) {
343 d = vp8_rac_get_uint(c, 4) + 16;
344 } else if (!vp8_rac_get(c)) {
345 d = vp8_rac_get_uint(c, 5) + 32;
347 d = vp8_rac_get_uint(c, 7);
349 d = (d << 1) - 65 + vp8_rac_get(c);
353 return p <= 128 ? 1 + inv_recenter_nonneg(inv_map_table[d], p - 1) :
354 255 - inv_recenter_nonneg(inv_map_table[d], 255 - p);
357 static int decode_frame_header(AVCodecContext *ctx,
358 const uint8_t *data, int size, int *ref)
360 VP9Context *s = ctx->priv_data;
361 int c, i, j, k, l, m, n, w, h, max, size2, res, sharp;
363 const uint8_t *data2;
366 if ((res = init_get_bits8(&s->gb, data, size)) < 0) {
367 av_log(ctx, AV_LOG_ERROR, "Failed to initialize bitstream reader\n");
370 if (get_bits(&s->gb, 2) != 0x2) { // frame marker
371 av_log(ctx, AV_LOG_ERROR, "Invalid frame marker\n");
372 return AVERROR_INVALIDDATA;
374 s->profile = get_bits1(&s->gb);
375 if (get_bits1(&s->gb)) { // reserved bit
376 av_log(ctx, AV_LOG_ERROR, "Reserved bit should be zero\n");
377 return AVERROR_INVALIDDATA;
379 if (get_bits1(&s->gb)) {
380 *ref = get_bits(&s->gb, 3);
383 s->last_keyframe = s->keyframe;
384 s->keyframe = !get_bits1(&s->gb);
385 last_invisible = s->invisible;
386 s->invisible = !get_bits1(&s->gb);
387 s->errorres = get_bits1(&s->gb);
388 // FIXME disable this upon resolution change
389 s->use_last_frame_mvs = !s->errorres && !last_invisible;
391 if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
392 av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
393 return AVERROR_INVALIDDATA;
395 s->colorspace = get_bits(&s->gb, 3);
396 if (s->colorspace == 7) { // RGB = profile 1
397 av_log(ctx, AV_LOG_ERROR, "RGB not supported in profile 0\n");
398 return AVERROR_INVALIDDATA;
400 s->fullrange = get_bits1(&s->gb);
401 // for profile 1, here follows the subsampling bits
402 s->refreshrefmask = 0xff;
403 w = get_bits(&s->gb, 16) + 1;
404 h = get_bits(&s->gb, 16) + 1;
405 if (get_bits1(&s->gb)) // display size
406 skip_bits(&s->gb, 32);
408 s->intraonly = s->invisible ? get_bits1(&s->gb) : 0;
409 s->resetctx = s->errorres ? 0 : get_bits(&s->gb, 2);
411 if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
412 av_log(ctx, AV_LOG_ERROR, "Invalid sync code\n");
413 return AVERROR_INVALIDDATA;
415 s->refreshrefmask = get_bits(&s->gb, 8);
416 w = get_bits(&s->gb, 16) + 1;
417 h = get_bits(&s->gb, 16) + 1;
418 if (get_bits1(&s->gb)) // display size
419 skip_bits(&s->gb, 32);
421 s->refreshrefmask = get_bits(&s->gb, 8);
422 s->refidx[0] = get_bits(&s->gb, 3);
423 s->signbias[0] = get_bits1(&s->gb);
424 s->refidx[1] = get_bits(&s->gb, 3);
425 s->signbias[1] = get_bits1(&s->gb);
426 s->refidx[2] = get_bits(&s->gb, 3);
427 s->signbias[2] = get_bits1(&s->gb);
428 if (!s->refs[s->refidx[0]] || !s->refs[s->refidx[1]] ||
429 !s->refs[s->refidx[2]]) {
430 av_log(ctx, AV_LOG_ERROR, "Not all references are available\n");
431 return AVERROR_INVALIDDATA;
433 if (get_bits1(&s->gb)) {
434 w = s->refs[s->refidx[0]]->width;
435 h = s->refs[s->refidx[0]]->height;
436 } else if (get_bits1(&s->gb)) {
437 w = s->refs[s->refidx[1]]->width;
438 h = s->refs[s->refidx[1]]->height;
439 } else if (get_bits1(&s->gb)) {
440 w = s->refs[s->refidx[2]]->width;
441 h = s->refs[s->refidx[2]]->height;
443 w = get_bits(&s->gb, 16) + 1;
444 h = get_bits(&s->gb, 16) + 1;
446 if (get_bits1(&s->gb)) // display size
447 skip_bits(&s->gb, 32);
448 s->highprecisionmvs = get_bits1(&s->gb);
449 s->filtermode = get_bits1(&s->gb) ? FILTER_SWITCHABLE :
451 s->allowcompinter = s->signbias[0] != s->signbias[1] ||
452 s->signbias[0] != s->signbias[2];
453 if (s->allowcompinter) {
454 if (s->signbias[0] == s->signbias[1]) {
456 s->varcompref[0] = 0;
457 s->varcompref[1] = 1;
458 } else if (s->signbias[0] == s->signbias[2]) {
460 s->varcompref[0] = 0;
461 s->varcompref[1] = 2;
464 s->varcompref[0] = 1;
465 s->varcompref[1] = 2;
470 s->refreshctx = s->errorres ? 0 : get_bits1(&s->gb);
471 s->parallelmode = s->errorres ? 1 : get_bits1(&s->gb);
472 s->framectxid = c = get_bits(&s->gb, 2);
474 /* loopfilter header data */
475 s->filter.level = get_bits(&s->gb, 6);
476 sharp = get_bits(&s->gb, 3);
477 // if sharpness changed, reinit lim/mblim LUTs. if it didn't change, keep
478 // the old cache values since they are still valid
479 if (s->filter.sharpness != sharp)
480 memset(s->filter.lim_lut, 0, sizeof(s->filter.lim_lut));
481 s->filter.sharpness = sharp;
482 if ((s->lf_delta.enabled = get_bits1(&s->gb))) {
483 if (get_bits1(&s->gb)) {
484 for (i = 0; i < 4; i++)
485 if (get_bits1(&s->gb))
486 s->lf_delta.ref[i] = get_sbits_inv(&s->gb, 6);
487 for (i = 0; i < 2; i++)
488 if (get_bits1(&s->gb))
489 s->lf_delta.mode[i] = get_sbits_inv(&s->gb, 6);
492 memset(&s->lf_delta, 0, sizeof(s->lf_delta));
495 /* quantization header data */
496 s->yac_qi = get_bits(&s->gb, 8);
497 s->ydc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
498 s->uvdc_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
499 s->uvac_qdelta = get_bits1(&s->gb) ? get_sbits_inv(&s->gb, 4) : 0;
500 s->lossless = s->yac_qi == 0 && s->ydc_qdelta == 0 &&
501 s->uvdc_qdelta == 0 && s->uvac_qdelta == 0;
503 /* segmentation header info */
504 if ((s->segmentation.enabled = get_bits1(&s->gb))) {
505 if ((s->segmentation.update_map = get_bits1(&s->gb))) {
506 for (i = 0; i < 7; i++)
507 s->prob.seg[i] = get_bits1(&s->gb) ?
508 get_bits(&s->gb, 8) : 255;
509 if ((s->segmentation.temporal = get_bits1(&s->gb)))
510 for (i = 0; i < 3; i++)
511 s->prob.segpred[i] = get_bits1(&s->gb) ?
512 get_bits(&s->gb, 8) : 255;
515 if (get_bits1(&s->gb)) {
516 s->segmentation.absolute_vals = get_bits1(&s->gb);
517 for (i = 0; i < 8; i++) {
518 if ((s->segmentation.feat[i].q_enabled = get_bits1(&s->gb)))
519 s->segmentation.feat[i].q_val = get_sbits_inv(&s->gb, 8);
520 if ((s->segmentation.feat[i].lf_enabled = get_bits1(&s->gb)))
521 s->segmentation.feat[i].lf_val = get_sbits_inv(&s->gb, 6);
522 if ((s->segmentation.feat[i].ref_enabled = get_bits1(&s->gb)))
523 s->segmentation.feat[i].ref_val = get_bits(&s->gb, 2);
524 s->segmentation.feat[i].skip_enabled = get_bits1(&s->gb);
528 s->segmentation.feat[0].q_enabled = 0;
529 s->segmentation.feat[0].lf_enabled = 0;
530 s->segmentation.feat[0].skip_enabled = 0;
531 s->segmentation.feat[0].ref_enabled = 0;
534 // set qmul[] based on Y/UV, AC/DC and segmentation Q idx deltas
535 for (i = 0; i < (s->segmentation.enabled ? 8 : 1); i++) {
536 int qyac, qydc, quvac, quvdc, lflvl, sh;
538 if (s->segmentation.feat[i].q_enabled) {
539 if (s->segmentation.absolute_vals)
540 qyac = s->segmentation.feat[i].q_val;
542 qyac = s->yac_qi + s->segmentation.feat[i].q_val;
546 qydc = av_clip_uintp2(qyac + s->ydc_qdelta, 8);
547 quvdc = av_clip_uintp2(qyac + s->uvdc_qdelta, 8);
548 quvac = av_clip_uintp2(qyac + s->uvac_qdelta, 8);
549 qyac = av_clip_uintp2(qyac, 8);
551 s->segmentation.feat[i].qmul[0][0] = vp9_dc_qlookup[qydc];
552 s->segmentation.feat[i].qmul[0][1] = vp9_ac_qlookup[qyac];
553 s->segmentation.feat[i].qmul[1][0] = vp9_dc_qlookup[quvdc];
554 s->segmentation.feat[i].qmul[1][1] = vp9_ac_qlookup[quvac];
556 sh = s->filter.level >= 32;
557 if (s->segmentation.feat[i].lf_enabled) {
558 if (s->segmentation.absolute_vals)
559 lflvl = s->segmentation.feat[i].lf_val;
561 lflvl = s->filter.level + s->segmentation.feat[i].lf_val;
563 lflvl = s->filter.level;
565 s->segmentation.feat[i].lflvl[0][0] =
566 s->segmentation.feat[i].lflvl[0][1] =
567 av_clip_uintp2(lflvl + (s->lf_delta.ref[0] << sh), 6);
568 for (j = 1; j < 4; j++) {
569 s->segmentation.feat[i].lflvl[j][0] =
570 av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
571 s->lf_delta.mode[0]) << sh), 6);
572 s->segmentation.feat[i].lflvl[j][1] =
573 av_clip_uintp2(lflvl + ((s->lf_delta.ref[j] +
574 s->lf_delta.mode[1]) << sh), 6);
579 if ((res = update_size(ctx, w, h)) < 0) {
580 av_log(ctx, AV_LOG_ERROR, "Failed to initialize decoder for %dx%d\n", w, h);
583 for (s->tiling.log2_tile_cols = 0;
584 (s->sb_cols >> s->tiling.log2_tile_cols) > 64;
585 s->tiling.log2_tile_cols++) ;
586 for (max = 0; (s->sb_cols >> max) >= 4; max++) ;
587 max = FFMAX(0, max - 1);
588 while (max > s->tiling.log2_tile_cols) {
589 if (get_bits1(&s->gb))
590 s->tiling.log2_tile_cols++;
594 s->tiling.log2_tile_rows = decode012(&s->gb);
595 s->tiling.tile_rows = 1 << s->tiling.log2_tile_rows;
596 if (s->tiling.tile_cols != (1 << s->tiling.log2_tile_cols)) {
597 s->tiling.tile_cols = 1 << s->tiling.log2_tile_cols;
598 s->c_b = av_fast_realloc(s->c_b, &s->c_b_size,
599 sizeof(VP56RangeCoder) * s->tiling.tile_cols);
601 av_log(ctx, AV_LOG_ERROR, "Ran out of memory during range coder init\n");
602 return AVERROR(ENOMEM);
606 if (s->keyframe || s->errorres || s->intraonly) {
607 s->prob_ctx[0].p = s->prob_ctx[1].p = s->prob_ctx[2].p =
608 s->prob_ctx[3].p = vp9_default_probs;
609 memcpy(s->prob_ctx[0].coef, vp9_default_coef_probs,
610 sizeof(vp9_default_coef_probs));
611 memcpy(s->prob_ctx[1].coef, vp9_default_coef_probs,
612 sizeof(vp9_default_coef_probs));
613 memcpy(s->prob_ctx[2].coef, vp9_default_coef_probs,
614 sizeof(vp9_default_coef_probs));
615 memcpy(s->prob_ctx[3].coef, vp9_default_coef_probs,
616 sizeof(vp9_default_coef_probs));
619 // next 16 bits is size of the rest of the header (arith-coded)
620 size2 = get_bits(&s->gb, 16);
621 data2 = align_get_bits(&s->gb);
622 if (size2 > size - (data2 - data)) {
623 av_log(ctx, AV_LOG_ERROR, "Invalid compressed header size\n");
624 return AVERROR_INVALIDDATA;
626 ff_vp56_init_range_decoder(&s->c, data2, size2);
627 if (vp56_rac_get_prob_branchy(&s->c, 128)) { // marker bit
628 av_log(ctx, AV_LOG_ERROR, "Marker bit was set\n");
629 return AVERROR_INVALIDDATA;
632 if (s->keyframe || s->intraonly) {
633 memset(s->counts.coef, 0, sizeof(s->counts.coef) + sizeof(s->counts.eob));
635 memset(&s->counts, 0, sizeof(s->counts));
637 // FIXME is it faster to not copy here, but do it down in the fw updates
638 // as explicit copies if the fw update is missing (and skip the copy upon
640 s->prob.p = s->prob_ctx[c].p;
644 s->txfmmode = TX_4X4;
646 s->txfmmode = vp8_rac_get_uint(&s->c, 2);
647 if (s->txfmmode == 3)
648 s->txfmmode += vp8_rac_get(&s->c);
650 if (s->txfmmode == TX_SWITCHABLE) {
651 for (i = 0; i < 2; i++)
652 if (vp56_rac_get_prob_branchy(&s->c, 252))
653 s->prob.p.tx8p[i] = update_prob(&s->c, s->prob.p.tx8p[i]);
654 for (i = 0; i < 2; i++)
655 for (j = 0; j < 2; j++)
656 if (vp56_rac_get_prob_branchy(&s->c, 252))
657 s->prob.p.tx16p[i][j] =
658 update_prob(&s->c, s->prob.p.tx16p[i][j]);
659 for (i = 0; i < 2; i++)
660 for (j = 0; j < 3; j++)
661 if (vp56_rac_get_prob_branchy(&s->c, 252))
662 s->prob.p.tx32p[i][j] =
663 update_prob(&s->c, s->prob.p.tx32p[i][j]);
668 for (i = 0; i < 4; i++) {
669 uint8_t (*ref)[2][6][6][3] = s->prob_ctx[c].coef[i];
670 if (vp8_rac_get(&s->c)) {
671 for (j = 0; j < 2; j++)
672 for (k = 0; k < 2; k++)
673 for (l = 0; l < 6; l++)
674 for (m = 0; m < 6; m++) {
675 uint8_t *p = s->prob.coef[i][j][k][l][m];
676 uint8_t *r = ref[j][k][l][m];
677 if (m >= 3 && l == 0) // dc only has 3 pt
679 for (n = 0; n < 3; n++) {
680 if (vp56_rac_get_prob_branchy(&s->c, 252)) {
681 p[n] = update_prob(&s->c, r[n]);
689 for (j = 0; j < 2; j++)
690 for (k = 0; k < 2; k++)
691 for (l = 0; l < 6; l++)
692 for (m = 0; m < 6; m++) {
693 uint8_t *p = s->prob.coef[i][j][k][l][m];
694 uint8_t *r = ref[j][k][l][m];
695 if (m > 3 && l == 0) // dc only has 3 pt
701 if (s->txfmmode == i)
706 for (i = 0; i < 3; i++)
707 if (vp56_rac_get_prob_branchy(&s->c, 252))
708 s->prob.p.skip[i] = update_prob(&s->c, s->prob.p.skip[i]);
709 if (!s->keyframe && !s->intraonly) {
710 for (i = 0; i < 7; i++)
711 for (j = 0; j < 3; j++)
712 if (vp56_rac_get_prob_branchy(&s->c, 252))
713 s->prob.p.mv_mode[i][j] =
714 update_prob(&s->c, s->prob.p.mv_mode[i][j]);
716 if (s->filtermode == FILTER_SWITCHABLE)
717 for (i = 0; i < 4; i++)
718 for (j = 0; j < 2; j++)
719 if (vp56_rac_get_prob_branchy(&s->c, 252))
720 s->prob.p.filter[i][j] =
721 update_prob(&s->c, s->prob.p.filter[i][j]);
723 for (i = 0; i < 4; i++)
724 if (vp56_rac_get_prob_branchy(&s->c, 252))
725 s->prob.p.intra[i] = update_prob(&s->c, s->prob.p.intra[i]);
727 if (s->allowcompinter) {
728 s->comppredmode = vp8_rac_get(&s->c);
730 s->comppredmode += vp8_rac_get(&s->c);
731 if (s->comppredmode == PRED_SWITCHABLE)
732 for (i = 0; i < 5; i++)
733 if (vp56_rac_get_prob_branchy(&s->c, 252))
735 update_prob(&s->c, s->prob.p.comp[i]);
737 s->comppredmode = PRED_SINGLEREF;
740 if (s->comppredmode != PRED_COMPREF) {
741 for (i = 0; i < 5; i++) {
742 if (vp56_rac_get_prob_branchy(&s->c, 252))
743 s->prob.p.single_ref[i][0] =
744 update_prob(&s->c, s->prob.p.single_ref[i][0]);
745 if (vp56_rac_get_prob_branchy(&s->c, 252))
746 s->prob.p.single_ref[i][1] =
747 update_prob(&s->c, s->prob.p.single_ref[i][1]);
751 if (s->comppredmode != PRED_SINGLEREF) {
752 for (i = 0; i < 5; i++)
753 if (vp56_rac_get_prob_branchy(&s->c, 252))
754 s->prob.p.comp_ref[i] =
755 update_prob(&s->c, s->prob.p.comp_ref[i]);
758 for (i = 0; i < 4; i++)
759 for (j = 0; j < 9; j++)
760 if (vp56_rac_get_prob_branchy(&s->c, 252))
761 s->prob.p.y_mode[i][j] =
762 update_prob(&s->c, s->prob.p.y_mode[i][j]);
764 for (i = 0; i < 4; i++)
765 for (j = 0; j < 4; j++)
766 for (k = 0; k < 3; k++)
767 if (vp56_rac_get_prob_branchy(&s->c, 252))
768 s->prob.p.partition[3 - i][j][k] =
769 update_prob(&s->c, s->prob.p.partition[3 - i][j][k]);
771 // mv fields don't use the update_prob subexp model for some reason
772 for (i = 0; i < 3; i++)
773 if (vp56_rac_get_prob_branchy(&s->c, 252))
774 s->prob.p.mv_joint[i] = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
776 for (i = 0; i < 2; i++) {
777 if (vp56_rac_get_prob_branchy(&s->c, 252))
778 s->prob.p.mv_comp[i].sign = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
780 for (j = 0; j < 10; j++)
781 if (vp56_rac_get_prob_branchy(&s->c, 252))
782 s->prob.p.mv_comp[i].classes[j] =
783 (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
785 if (vp56_rac_get_prob_branchy(&s->c, 252))
786 s->prob.p.mv_comp[i].class0 = (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
788 for (j = 0; j < 10; j++)
789 if (vp56_rac_get_prob_branchy(&s->c, 252))
790 s->prob.p.mv_comp[i].bits[j] =
791 (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
794 for (i = 0; i < 2; i++) {
795 for (j = 0; j < 2; j++)
796 for (k = 0; k < 3; k++)
797 if (vp56_rac_get_prob_branchy(&s->c, 252))
798 s->prob.p.mv_comp[i].class0_fp[j][k] =
799 (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
801 for (j = 0; j < 3; j++)
802 if (vp56_rac_get_prob_branchy(&s->c, 252))
803 s->prob.p.mv_comp[i].fp[j] =
804 (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
807 if (s->highprecisionmvs) {
808 for (i = 0; i < 2; i++) {
809 if (vp56_rac_get_prob_branchy(&s->c, 252))
810 s->prob.p.mv_comp[i].class0_hp =
811 (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
813 if (vp56_rac_get_prob_branchy(&s->c, 252))
814 s->prob.p.mv_comp[i].hp =
815 (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
820 return (data2 - data) + size2;
823 static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
826 dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
827 dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
830 static void find_ref_mvs(VP9Context *s,
831 VP56mv *pmv, int ref, int z, int idx, int sb)
833 static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
834 [BS_64x64] = {{ 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
835 { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 }},
836 [BS_64x32] = {{ 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
837 { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 }},
838 [BS_32x64] = {{ -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
839 { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 }},
840 [BS_32x32] = {{ 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
841 { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
842 [BS_32x16] = {{ 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
843 { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
844 [BS_16x32] = {{ -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
845 { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 }},
846 [BS_16x16] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
847 { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }},
848 [BS_16x8] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
849 { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }},
850 [BS_8x16] = {{ -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
851 { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 }},
852 [BS_8x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
853 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
854 [BS_8x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
855 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
856 [BS_4x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
857 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
858 [BS_4x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
859 { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }},
861 VP9Block *const b = &s->b;
862 int row = b->row, col = b->col, row7 = b->row7;
863 const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
864 #define INVALID_MV 0x80008000U
865 uint32_t mem = INVALID_MV;
868 #define RETURN_DIRECT_MV(mv) \
870 uint32_t m = AV_RN32A(&mv); \
874 } else if (mem == INVALID_MV) { \
876 } else if (m != mem) { \
883 if (sb == 2 || sb == 1) {
884 RETURN_DIRECT_MV(b->mv[0][z]);
885 } else if (sb == 3) {
886 RETURN_DIRECT_MV(b->mv[2][z]);
887 RETURN_DIRECT_MV(b->mv[1][z]);
888 RETURN_DIRECT_MV(b->mv[0][z]);
891 #define RETURN_MV(mv) \
896 clamp_mv(&tmp, &mv, s); \
897 m = AV_RN32A(&tmp); \
901 } else if (mem == INVALID_MV) { \
903 } else if (m != mem) { \
908 uint32_t m = AV_RN32A(&mv); \
910 clamp_mv(pmv, &mv, s); \
912 } else if (mem == INVALID_MV) { \
914 } else if (m != mem) { \
915 clamp_mv(pmv, &mv, s); \
922 struct VP9mvrefPair *mv = &s->mv[0][(row - 1) * s->sb_cols * 8 + col];
923 if (mv->ref[0] == ref) {
924 RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
925 } else if (mv->ref[1] == ref) {
926 RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
929 if (col > s->tiling.tile_col_start) {
930 struct VP9mvrefPair *mv = &s->mv[0][row * s->sb_cols * 8 + col - 1];
931 if (mv->ref[0] == ref) {
932 RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
933 } else if (mv->ref[1] == ref) {
934 RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
942 // previously coded MVs in this neighbourhood, using same reference frame
944 int c = p[i][0] + col, r = p[i][1] + row;
946 if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
947 struct VP9mvrefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c];
949 if (mv->ref[0] == ref) {
950 RETURN_MV(mv->mv[0]);
951 } else if (mv->ref[1] == ref) {
952 RETURN_MV(mv->mv[1]);
957 // MV at this position in previous frame, using same reference frame
958 if (s->use_last_frame_mvs) {
959 struct VP9mvrefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col];
961 if (mv->ref[0] == ref) {
962 RETURN_MV(mv->mv[0]);
963 } else if (mv->ref[1] == ref) {
964 RETURN_MV(mv->mv[1]);
968 #define RETURN_SCALE_MV(mv, scale) \
971 VP56mv mv_temp = { -mv.x, -mv.y }; \
972 RETURN_MV(mv_temp); \
978 // previously coded MVs in this neighbourhood, using different reference frame
979 for (i = 0; i < 8; i++) {
980 int c = p[i][0] + col, r = p[i][1] + row;
982 if (c >= s->tiling.tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
983 struct VP9mvrefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c];
985 if (mv->ref[0] != ref && mv->ref[0] >= 0) {
986 RETURN_SCALE_MV(mv->mv[0], s->signbias[mv->ref[0]] != s->signbias[ref]);
988 if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
989 // BUG - libvpx has this condition regardless of whether
990 // we used the first ref MV and pre-scaling
991 AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
992 RETURN_SCALE_MV(mv->mv[1], s->signbias[mv->ref[1]] != s->signbias[ref]);
997 // MV at this position in previous frame, using different reference frame
998 if (s->use_last_frame_mvs) {
999 struct VP9mvrefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col];
1001 if (mv->ref[0] != ref && mv->ref[0] >= 0) {
1002 RETURN_SCALE_MV(mv->mv[0], s->signbias[mv->ref[0]] != s->signbias[ref]);
1004 if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
1005 // BUG - libvpx has this condition regardless of whether
1006 // we used the first ref MV and pre-scaling
1007 AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
1008 RETURN_SCALE_MV(mv->mv[1], s->signbias[mv->ref[1]] != s->signbias[ref]);
1015 #undef RETURN_SCALE_MV
1018 static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
1020 int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
1021 int n, c = vp8_rac_get_tree(&s->c, vp9_mv_class_tree,
1022 s->prob.p.mv_comp[idx].classes);
1024 s->counts.mv_comp[idx].sign[sign]++;
1025 s->counts.mv_comp[idx].classes[c]++;
1029 for (n = 0, m = 0; m < c; m++) {
1030 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
1032 s->counts.mv_comp[idx].bits[m][bit]++;
1035 bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree, s->prob.p.mv_comp[idx].fp);
1037 s->counts.mv_comp[idx].fp[bit]++;
1039 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
1040 s->counts.mv_comp[idx].hp[bit]++;
1044 // bug in libvpx - we count for bw entropy purposes even if the
1046 s->counts.mv_comp[idx].hp[1]++;
1050 n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
1051 s->counts.mv_comp[idx].class0[n]++;
1052 bit = vp8_rac_get_tree(&s->c, vp9_mv_fp_tree,
1053 s->prob.p.mv_comp[idx].class0_fp[n]);
1054 s->counts.mv_comp[idx].class0_fp[n][bit]++;
1055 n = (n << 3) | (bit << 1);
1057 bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
1058 s->counts.mv_comp[idx].class0_hp[bit]++;
1062 // bug in libvpx - we count for bw entropy purposes even if the
1064 s->counts.mv_comp[idx].class0_hp[1]++;
1068 return sign ? -(n + 1) : (n + 1);
1071 static void fill_mv(VP9Context *s,
1072 VP56mv *mv, int mode, int sb)
1074 VP9Block *const b = &s->b;
1076 if (mode == ZEROMV) {
1077 memset(mv, 0, sizeof(*mv) * 2);
1081 // FIXME cache this value and reuse for other subblocks
1082 find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
1083 mode == NEWMV ? -1 : sb);
1084 // FIXME maybe move this code into find_ref_mvs()
1085 if ((mode == NEWMV || sb == -1) &&
1086 !(hp = s->highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
1100 if (mode == NEWMV) {
1101 enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
1102 s->prob.p.mv_joint);
1104 s->counts.mv_joint[j]++;
1105 if (j >= MV_JOINT_V)
1106 mv[0].y += read_mv_component(s, 0, hp);
1108 mv[0].x += read_mv_component(s, 1, hp);
1112 // FIXME cache this value and reuse for other subblocks
1113 find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
1114 mode == NEWMV ? -1 : sb);
1115 if ((mode == NEWMV || sb == -1) &&
1116 !(hp = s->highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
1130 if (mode == NEWMV) {
1131 enum MVJoint j = vp8_rac_get_tree(&s->c, vp9_mv_joint_tree,
1132 s->prob.p.mv_joint);
1134 s->counts.mv_joint[j]++;
1135 if (j >= MV_JOINT_V)
1136 mv[1].y += read_mv_component(s, 0, hp);
1138 mv[1].x += read_mv_component(s, 1, hp);
1144 static void decode_mode(AVCodecContext *ctx)
1146 static const uint8_t left_ctx[N_BS_SIZES] = {
1147 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
1149 static const uint8_t above_ctx[N_BS_SIZES] = {
1150 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
1152 static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = {
1153 TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
1154 TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
1156 VP9Context *s = ctx->priv_data;
1157 VP9Block *const b = &s->b;
1158 int row = b->row, col = b->col, row7 = b->row7;
1159 enum TxfmMode max_tx = max_tx_for_bl_bp[b->bs];
1160 int w4 = FFMIN(s->cols - col, bwh_tab[1][b->bs][0]);
1161 int h4 = FFMIN(s->rows - row, bwh_tab[1][b->bs][1]), y;
1162 int have_a = row > 0, have_l = col > s->tiling.tile_col_start;
1164 if (!s->segmentation.enabled) {
1166 } else if (s->keyframe || s->intraonly) {
1167 b->seg_id = s->segmentation.update_map ?
1168 vp8_rac_get_tree(&s->c, vp9_segmentation_tree, s->prob.seg) : 0;
1169 } else if (!s->segmentation.update_map ||
1170 (s->segmentation.temporal &&
1171 vp56_rac_get_prob_branchy(&s->c,
1172 s->prob.segpred[s->above_segpred_ctx[col] +
1173 s->left_segpred_ctx[row7]]))) {
1176 for (y = 0; y < h4; y++)
1177 for (x = 0; x < w4; x++)
1178 pred = FFMIN(pred, s->segmentation_map[(y + row) * 8 * s->sb_cols + x + col]);
1179 av_assert1(pred < 8);
1182 memset(&s->above_segpred_ctx[col], 1, w4);
1183 memset(&s->left_segpred_ctx[row7], 1, h4);
1185 b->seg_id = vp8_rac_get_tree(&s->c, vp9_segmentation_tree,
1188 memset(&s->above_segpred_ctx[col], 0, w4);
1189 memset(&s->left_segpred_ctx[row7], 0, h4);
1191 if ((s->segmentation.enabled && s->segmentation.update_map) || s->keyframe) {
1192 for (y = 0; y < h4; y++)
1193 memset(&s->segmentation_map[(y + row) * 8 * s->sb_cols + col],
1197 b->skip = s->segmentation.enabled &&
1198 s->segmentation.feat[b->seg_id].skip_enabled;
1200 int c = s->left_skip_ctx[row7] + s->above_skip_ctx[col];
1201 b->skip = vp56_rac_get_prob(&s->c, s->prob.p.skip[c]);
1202 s->counts.skip[c][b->skip]++;
1205 if (s->keyframe || s->intraonly) {
1207 } else if (s->segmentation.feat[b->seg_id].ref_enabled) {
1208 b->intra = !s->segmentation.feat[b->seg_id].ref_val;
1212 if (have_a && have_l) {
1213 c = s->above_intra_ctx[col] + s->left_intra_ctx[row7];
1216 c = have_a ? 2 * s->above_intra_ctx[col] :
1217 have_l ? 2 * s->left_intra_ctx[row7] : 0;
1219 bit = vp56_rac_get_prob(&s->c, s->prob.p.intra[c]);
1220 s->counts.intra[c][bit]++;
1224 if ((b->intra || !b->skip) && s->txfmmode == TX_SWITCHABLE) {
1228 c = (s->above_skip_ctx[col] ? max_tx :
1229 s->above_txfm_ctx[col]) +
1230 (s->left_skip_ctx[row7] ? max_tx :
1231 s->left_txfm_ctx[row7]) > max_tx;
1233 c = s->above_skip_ctx[col] ? 1 :
1234 (s->above_txfm_ctx[col] * 2 > max_tx);
1236 } else if (have_l) {
1237 c = s->left_skip_ctx[row7] ? 1 :
1238 (s->left_txfm_ctx[row7] * 2 > max_tx);
1244 b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][0]);
1246 b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][1]);
1248 b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx32p[c][2]);
1250 s->counts.tx32p[c][b->tx]++;
1253 b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][0]);
1255 b->tx += vp56_rac_get_prob(&s->c, s->prob.p.tx16p[c][1]);
1256 s->counts.tx16p[c][b->tx]++;
1259 b->tx = vp56_rac_get_prob(&s->c, s->prob.p.tx8p[c]);
1260 s->counts.tx8p[c][b->tx]++;
1267 b->tx = FFMIN(max_tx, s->txfmmode);
1270 if (s->keyframe || s->intraonly) {
1271 uint8_t *a = &s->above_mode_ctx[col * 2];
1272 uint8_t *l = &s->left_mode_ctx[(row7) << 1];
1275 if (b->bs > BS_8x8) {
1276 // FIXME the memory storage intermediates here aren't really
1277 // necessary, they're just there to make the code slightly
1279 b->mode[0] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1280 vp9_default_kf_ymode_probs[a[0]][l[0]]);
1281 if (b->bs != BS_8x4) {
1282 b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1283 vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
1284 l[0] = a[1] = b->mode[1];
1286 l[0] = a[1] = b->mode[1] = b->mode[0];
1288 if (b->bs != BS_4x8) {
1289 b->mode[2] = a[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1290 vp9_default_kf_ymode_probs[a[0]][l[1]]);
1291 if (b->bs != BS_8x4) {
1292 b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1293 vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
1294 l[1] = a[1] = b->mode[3];
1296 l[1] = a[1] = b->mode[3] = b->mode[2];
1299 b->mode[2] = b->mode[0];
1300 l[1] = a[1] = b->mode[3] = b->mode[1];
1303 b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1304 vp9_default_kf_ymode_probs[*a][*l]);
1305 b->mode[3] = b->mode[2] = b->mode[1] = b->mode[0];
1306 // FIXME this can probably be optimized
1307 memset(a, b->mode[0], bwh_tab[0][b->bs][0]);
1308 memset(l, b->mode[0], bwh_tab[0][b->bs][1]);
1310 b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1311 vp9_default_kf_uvmode_probs[b->mode[3]]);
1312 } else if (b->intra) {
1314 if (b->bs > BS_8x8) {
1315 b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1316 s->prob.p.y_mode[0]);
1317 s->counts.y_mode[0][b->mode[0]]++;
1318 if (b->bs != BS_8x4) {
1319 b->mode[1] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1320 s->prob.p.y_mode[0]);
1321 s->counts.y_mode[0][b->mode[1]]++;
1323 b->mode[1] = b->mode[0];
1325 if (b->bs != BS_4x8) {
1326 b->mode[2] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1327 s->prob.p.y_mode[0]);
1328 s->counts.y_mode[0][b->mode[2]]++;
1329 if (b->bs != BS_8x4) {
1330 b->mode[3] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1331 s->prob.p.y_mode[0]);
1332 s->counts.y_mode[0][b->mode[3]]++;
1334 b->mode[3] = b->mode[2];
1337 b->mode[2] = b->mode[0];
1338 b->mode[3] = b->mode[1];
1341 static const uint8_t size_group[10] = {
1342 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
1344 int sz = size_group[b->bs];
1346 b->mode[0] = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1347 s->prob.p.y_mode[sz]);
1348 b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
1349 s->counts.y_mode[sz][b->mode[3]]++;
1351 b->uvmode = vp8_rac_get_tree(&s->c, vp9_intramode_tree,
1352 s->prob.p.uv_mode[b->mode[3]]);
1353 s->counts.uv_mode[b->mode[3]][b->uvmode]++;
1355 static const uint8_t inter_mode_ctx_lut[14][14] = {
1356 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1357 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1358 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1359 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1360 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1361 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1362 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1363 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1364 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1365 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
1366 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
1367 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
1368 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
1369 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
1372 if (s->segmentation.feat[b->seg_id].ref_enabled) {
1373 av_assert2(s->segmentation.feat[b->seg_id].ref_val != 0);
1375 b->ref[0] = s->segmentation.feat[b->seg_id].ref_val - 1;
1377 // read comp_pred flag
1378 if (s->comppredmode != PRED_SWITCHABLE) {
1379 b->comp = s->comppredmode == PRED_COMPREF;
1383 // FIXME add intra as ref=0xff (or -1) to make these easier?
1386 if (s->above_comp_ctx[col] && s->left_comp_ctx[row7]) {
1388 } else if (s->above_comp_ctx[col]) {
1389 c = 2 + (s->left_intra_ctx[row7] ||
1390 s->left_ref_ctx[row7] == s->fixcompref);
1391 } else if (s->left_comp_ctx[row7]) {
1392 c = 2 + (s->above_intra_ctx[col] ||
1393 s->above_ref_ctx[col] == s->fixcompref);
1395 c = (!s->above_intra_ctx[col] &&
1396 s->above_ref_ctx[col] == s->fixcompref) ^
1397 (!s->left_intra_ctx[row7] &&
1398 s->left_ref_ctx[row & 7] == s->fixcompref);
1401 c = s->above_comp_ctx[col] ? 3 :
1402 (!s->above_intra_ctx[col] && s->above_ref_ctx[col] == s->fixcompref);
1404 } else if (have_l) {
1405 c = s->left_comp_ctx[row7] ? 3 :
1406 (!s->left_intra_ctx[row7] && s->left_ref_ctx[row7] == s->fixcompref);
1410 b->comp = vp56_rac_get_prob(&s->c, s->prob.p.comp[c]);
1411 s->counts.comp[c][b->comp]++;
1414 // read actual references
1415 // FIXME probably cache a few variables here to prevent repetitive
1416 // memory accesses below
1417 if (b->comp) /* two references */ {
1418 int fix_idx = s->signbias[s->fixcompref], var_idx = !fix_idx, c, bit;
1420 b->ref[fix_idx] = s->fixcompref;
1421 // FIXME can this codeblob be replaced by some sort of LUT?
1424 if (s->above_intra_ctx[col]) {
1425 if (s->left_intra_ctx[row7]) {
1428 c = 1 + 2 * (s->left_ref_ctx[row7] != s->varcompref[1]);
1430 } else if (s->left_intra_ctx[row7]) {
1431 c = 1 + 2 * (s->above_ref_ctx[col] != s->varcompref[1]);
1433 int refl = s->left_ref_ctx[row7], refa = s->above_ref_ctx[col];
1435 if (refl == refa && refa == s->varcompref[1]) {
1437 } else if (!s->left_comp_ctx[row7] && !s->above_comp_ctx[col]) {
1438 if ((refa == s->fixcompref && refl == s->varcompref[0]) ||
1439 (refl == s->fixcompref && refa == s->varcompref[0])) {
1442 c = (refa == refl) ? 3 : 1;
1444 } else if (!s->left_comp_ctx[row7]) {
1445 if (refa == s->varcompref[1] && refl != s->varcompref[1]) {
1448 c = (refl == s->varcompref[1] &&
1449 refa != s->varcompref[1]) ? 2 : 4;
1451 } else if (!s->above_comp_ctx[col]) {
1452 if (refl == s->varcompref[1] && refa != s->varcompref[1]) {
1455 c = (refa == s->varcompref[1] &&
1456 refl != s->varcompref[1]) ? 2 : 4;
1459 c = (refl == refa) ? 4 : 2;
1463 if (s->above_intra_ctx[col]) {
1465 } else if (s->above_comp_ctx[col]) {
1466 c = 4 * (s->above_ref_ctx[col] != s->varcompref[1]);
1468 c = 3 * (s->above_ref_ctx[col] != s->varcompref[1]);
1471 } else if (have_l) {
1472 if (s->left_intra_ctx[row7]) {
1474 } else if (s->left_comp_ctx[row7]) {
1475 c = 4 * (s->left_ref_ctx[row7] != s->varcompref[1]);
1477 c = 3 * (s->left_ref_ctx[row7] != s->varcompref[1]);
1482 bit = vp56_rac_get_prob(&s->c, s->prob.p.comp_ref[c]);
1483 b->ref[var_idx] = s->varcompref[bit];
1484 s->counts.comp_ref[c][bit]++;
1485 } else /* single reference */ {
1488 if (have_a && !s->above_intra_ctx[col]) {
1489 if (have_l && !s->left_intra_ctx[row7]) {
1490 if (s->left_comp_ctx[row7]) {
1491 if (s->above_comp_ctx[col]) {
1492 c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7] ||
1493 !s->above_ref_ctx[col]);
1495 c = (3 * !s->above_ref_ctx[col]) +
1496 (!s->fixcompref || !s->left_ref_ctx[row7]);
1498 } else if (s->above_comp_ctx[col]) {
1499 c = (3 * !s->left_ref_ctx[row7]) +
1500 (!s->fixcompref || !s->above_ref_ctx[col]);
1502 c = 2 * !s->left_ref_ctx[row7] + 2 * !s->above_ref_ctx[col];
1504 } else if (s->above_intra_ctx[col]) {
1506 } else if (s->above_comp_ctx[col]) {
1507 c = 1 + (!s->fixcompref || !s->above_ref_ctx[col]);
1509 c = 4 * (!s->above_ref_ctx[col]);
1511 } else if (have_l && !s->left_intra_ctx[row7]) {
1512 if (s->left_intra_ctx[row7]) {
1514 } else if (s->left_comp_ctx[row7]) {
1515 c = 1 + (!s->fixcompref || !s->left_ref_ctx[row7]);
1517 c = 4 * (!s->left_ref_ctx[row7]);
1522 bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][0]);
1523 s->counts.single_ref[c][0][bit]++;
1527 // FIXME can this codeblob be replaced by some sort of LUT?
1530 if (s->left_intra_ctx[row7]) {
1531 if (s->above_intra_ctx[col]) {
1533 } else if (s->above_comp_ctx[col]) {
1534 c = 1 + 2 * (s->fixcompref == 1 ||
1535 s->above_ref_ctx[col] == 1);
1536 } else if (!s->above_ref_ctx[col]) {
1539 c = 4 * (s->above_ref_ctx[col] == 1);
1541 } else if (s->above_intra_ctx[col]) {
1542 if (s->left_intra_ctx[row7]) {
1544 } else if (s->left_comp_ctx[row7]) {
1545 c = 1 + 2 * (s->fixcompref == 1 ||
1546 s->left_ref_ctx[row7] == 1);
1547 } else if (!s->left_ref_ctx[row7]) {
1550 c = 4 * (s->left_ref_ctx[row7] == 1);
1552 } else if (s->above_comp_ctx[col]) {
1553 if (s->left_comp_ctx[row7]) {
1554 if (s->left_ref_ctx[row7] == s->above_ref_ctx[col]) {
1555 c = 3 * (s->fixcompref == 1 ||
1556 s->left_ref_ctx[row7] == 1);
1560 } else if (!s->left_ref_ctx[row7]) {
1561 c = 1 + 2 * (s->fixcompref == 1 ||
1562 s->above_ref_ctx[col] == 1);
1564 c = 3 * (s->left_ref_ctx[row7] == 1) +
1565 (s->fixcompref == 1 || s->above_ref_ctx[col] == 1);
1567 } else if (s->left_comp_ctx[row7]) {
1568 if (!s->above_ref_ctx[col]) {
1569 c = 1 + 2 * (s->fixcompref == 1 ||
1570 s->left_ref_ctx[row7] == 1);
1572 c = 3 * (s->above_ref_ctx[col] == 1) +
1573 (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1);
1575 } else if (!s->above_ref_ctx[col]) {
1576 if (!s->left_ref_ctx[row7]) {
1579 c = 4 * (s->left_ref_ctx[row7] == 1);
1581 } else if (!s->left_ref_ctx[row7]) {
1582 c = 4 * (s->above_ref_ctx[col] == 1);
1584 c = 2 * (s->left_ref_ctx[row7] == 1) +
1585 2 * (s->above_ref_ctx[col] == 1);
1588 if (s->above_intra_ctx[col] ||
1589 (!s->above_comp_ctx[col] && !s->above_ref_ctx[col])) {
1591 } else if (s->above_comp_ctx[col]) {
1592 c = 3 * (s->fixcompref == 1 || s->above_ref_ctx[col] == 1);
1594 c = 4 * (s->above_ref_ctx[col] == 1);
1597 } else if (have_l) {
1598 if (s->left_intra_ctx[row7] ||
1599 (!s->left_comp_ctx[row7] && !s->left_ref_ctx[row7])) {
1601 } else if (s->left_comp_ctx[row7]) {
1602 c = 3 * (s->fixcompref == 1 || s->left_ref_ctx[row7] == 1);
1604 c = 4 * (s->left_ref_ctx[row7] == 1);
1609 bit = vp56_rac_get_prob(&s->c, s->prob.p.single_ref[c][1]);
1610 s->counts.single_ref[c][1][bit]++;
1611 b->ref[0] = 1 + bit;
1616 if (b->bs <= BS_8x8) {
1617 if (s->segmentation.feat[b->seg_id].skip_enabled) {
1618 b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV;
1620 static const uint8_t off[10] = {
1621 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
1624 // FIXME this needs to use the LUT tables from find_ref_mvs
1625 // because not all are -1,0/0,-1
1626 int c = inter_mode_ctx_lut[s->above_mode_ctx[col + off[b->bs]]]
1627 [s->left_mode_ctx[row7 + off[b->bs]]];
1629 b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
1630 s->prob.p.mv_mode[c]);
1631 b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0];
1632 s->counts.mv_mode[c][b->mode[0] - 10]++;
1636 if (s->filtermode == FILTER_SWITCHABLE) {
1639 if (have_a && s->above_mode_ctx[col] >= NEARESTMV) {
1640 if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
1641 c = s->above_filter_ctx[col] == s->left_filter_ctx[row7] ?
1642 s->left_filter_ctx[row7] : 3;
1644 c = s->above_filter_ctx[col];
1646 } else if (have_l && s->left_mode_ctx[row7] >= NEARESTMV) {
1647 c = s->left_filter_ctx[row7];
1652 b->filter = vp8_rac_get_tree(&s->c, vp9_filter_tree,
1653 s->prob.p.filter[c]);
1654 s->counts.filter[c][b->filter]++;
1656 b->filter = s->filtermode;
1659 if (b->bs > BS_8x8) {
1660 int c = inter_mode_ctx_lut[s->above_mode_ctx[col]][s->left_mode_ctx[row7]];
1662 b->mode[0] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
1663 s->prob.p.mv_mode[c]);
1664 s->counts.mv_mode[c][b->mode[0] - 10]++;
1665 fill_mv(s, b->mv[0], b->mode[0], 0);
1667 if (b->bs != BS_8x4) {
1668 b->mode[1] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
1669 s->prob.p.mv_mode[c]);
1670 s->counts.mv_mode[c][b->mode[1] - 10]++;
1671 fill_mv(s, b->mv[1], b->mode[1], 1);
1673 b->mode[1] = b->mode[0];
1674 AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
1675 AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
1678 if (b->bs != BS_4x8) {
1679 b->mode[2] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
1680 s->prob.p.mv_mode[c]);
1681 s->counts.mv_mode[c][b->mode[2] - 10]++;
1682 fill_mv(s, b->mv[2], b->mode[2], 2);
1684 if (b->bs != BS_8x4) {
1685 b->mode[3] = vp8_rac_get_tree(&s->c, vp9_inter_mode_tree,
1686 s->prob.p.mv_mode[c]);
1687 s->counts.mv_mode[c][b->mode[3] - 10]++;
1688 fill_mv(s, b->mv[3], b->mode[3], 3);
1690 b->mode[3] = b->mode[2];
1691 AV_COPY32(&b->mv[3][0], &b->mv[2][0]);
1692 AV_COPY32(&b->mv[3][1], &b->mv[2][1]);
1695 b->mode[2] = b->mode[0];
1696 AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
1697 AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
1698 b->mode[3] = b->mode[1];
1699 AV_COPY32(&b->mv[3][0], &b->mv[1][0]);
1700 AV_COPY32(&b->mv[3][1], &b->mv[1][1]);
1703 fill_mv(s, b->mv[0], b->mode[0], -1);
1704 AV_COPY32(&b->mv[1][0], &b->mv[0][0]);
1705 AV_COPY32(&b->mv[2][0], &b->mv[0][0]);
1706 AV_COPY32(&b->mv[3][0], &b->mv[0][0]);
1707 AV_COPY32(&b->mv[1][1], &b->mv[0][1]);
1708 AV_COPY32(&b->mv[2][1], &b->mv[0][1]);
1709 AV_COPY32(&b->mv[3][1], &b->mv[0][1]);
1713 // FIXME this can probably be optimized
1714 memset(&s->above_skip_ctx[col], b->skip, w4);
1715 memset(&s->left_skip_ctx[row7], b->skip, h4);
1716 memset(&s->above_txfm_ctx[col], b->tx, w4);
1717 memset(&s->left_txfm_ctx[row7], b->tx, h4);
1718 memset(&s->above_partition_ctx[col], above_ctx[b->bs], w4);
1719 memset(&s->left_partition_ctx[row7], left_ctx[b->bs], h4);
1720 if (!s->keyframe && !s->intraonly) {
1721 memset(&s->above_intra_ctx[col], b->intra, w4);
1722 memset(&s->left_intra_ctx[row7], b->intra, h4);
1723 memset(&s->above_comp_ctx[col], b->comp, w4);
1724 memset(&s->left_comp_ctx[row7], b->comp, h4);
1725 memset(&s->above_mode_ctx[col], b->mode[3], w4);
1726 memset(&s->left_mode_ctx[row7], b->mode[3], h4);
1727 if (s->filtermode == FILTER_SWITCHABLE && !b->intra ) {
1728 memset(&s->above_filter_ctx[col], b->filter, w4);
1729 memset(&s->left_filter_ctx[row7], b->filter, h4);
1730 b->filter = vp9_filter_lut[b->filter];
1732 if (b->bs > BS_8x8) {
1733 int mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
1735 AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][0], &b->mv[1][0]);
1736 AV_COPY32(&s->left_mv_ctx[row7 * 2 + 0][1], &b->mv[1][1]);
1737 AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][0], mv0);
1738 AV_WN32A(&s->left_mv_ctx[row7 * 2 + 1][1], mv1);
1739 AV_COPY32(&s->above_mv_ctx[col * 2 + 0][0], &b->mv[2][0]);
1740 AV_COPY32(&s->above_mv_ctx[col * 2 + 0][1], &b->mv[2][1]);
1741 AV_WN32A(&s->above_mv_ctx[col * 2 + 1][0], mv0);
1742 AV_WN32A(&s->above_mv_ctx[col * 2 + 1][1], mv1);
1744 int n, mv0 = AV_RN32A(&b->mv[3][0]), mv1 = AV_RN32A(&b->mv[3][1]);
1746 for (n = 0; n < w4 * 2; n++) {
1747 AV_WN32A(&s->above_mv_ctx[col * 2 + n][0], mv0);
1748 AV_WN32A(&s->above_mv_ctx[col * 2 + n][1], mv1);
1750 for (n = 0; n < h4 * 2; n++) {
1751 AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][0], mv0);
1752 AV_WN32A(&s->left_mv_ctx[row7 * 2 + n][1], mv1);
1756 if (!b->intra) { // FIXME write 0xff or -1 if intra, so we can use this
1757 // as a direct check in above branches
1758 int vref = b->ref[b->comp ? s->signbias[s->varcompref[0]] : 0];
1760 memset(&s->above_ref_ctx[col], vref, w4);
1761 memset(&s->left_ref_ctx[row7], vref, h4);
1766 for (y = 0; y < h4; y++) {
1767 int x, o = (row + y) * s->sb_cols * 8 + col;
1770 for (x = 0; x < w4; x++) {
1771 s->mv[0][o + x].ref[0] =
1772 s->mv[0][o + x].ref[1] = -1;
1774 } else if (b->comp) {
1775 for (x = 0; x < w4; x++) {
1776 s->mv[0][o + x].ref[0] = b->ref[0];
1777 s->mv[0][o + x].ref[1] = b->ref[1];
1778 AV_COPY32(&s->mv[0][o + x].mv[0], &b->mv[3][0]);
1779 AV_COPY32(&s->mv[0][o + x].mv[1], &b->mv[3][1]);
1782 for (x = 0; x < w4; x++) {
1783 s->mv[0][o + x].ref[0] = b->ref[0];
1784 s->mv[0][o + x].ref[1] = -1;
1785 AV_COPY32(&s->mv[0][o + x].mv[0], &b->mv[3][0]);
1791 // FIXME remove tx argument, and merge cnt/eob arguments?
1792 static int decode_coeffs_b(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
1793 enum TxfmMode tx, unsigned (*cnt)[6][3],
1794 unsigned (*eob)[6][2], uint8_t (*p)[6][11],
1795 int nnz, const int16_t *scan, const int16_t (*nb)[2],
1796 const int16_t *band_counts, const int16_t *qmul)
1798 int i = 0, band = 0, band_left = band_counts[band];
1799 uint8_t *tp = p[0][nnz];
1800 uint8_t cache[1024];
1805 val = vp56_rac_get_prob_branchy(c, tp[0]); // eob
1806 eob[band][nnz][val]++;
1811 if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
1812 cnt[band][nnz][0]++;
1814 band_left = band_counts[++band];
1816 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
1818 if (++i == n_coeffs)
1819 break; //invalid input; blocks should end with EOB
1824 if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one
1825 cnt[band][nnz][1]++;
1829 // fill in p[3-10] (model fill) - only once per frame for each pos
1831 memcpy(&tp[3], vp9_model_pareto8[tp[2]], 8);
1833 cnt[band][nnz][2]++;
1834 if (!vp56_rac_get_prob_branchy(c, tp[3])) { // 2, 3, 4
1835 if (!vp56_rac_get_prob_branchy(c, tp[4])) {
1836 cache[rc] = val = 2;
1838 val = 3 + vp56_rac_get_prob(c, tp[5]);
1841 } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
1843 if (!vp56_rac_get_prob_branchy(c, tp[7])) {
1844 val = 5 + vp56_rac_get_prob(c, 159);
1846 val = 7 + (vp56_rac_get_prob(c, 165) << 1);
1847 val += vp56_rac_get_prob(c, 145);
1851 if (!vp56_rac_get_prob_branchy(c, tp[8])) {
1852 if (!vp56_rac_get_prob_branchy(c, tp[9])) {
1853 val = 11 + (vp56_rac_get_prob(c, 173) << 2);
1854 val += (vp56_rac_get_prob(c, 148) << 1);
1855 val += vp56_rac_get_prob(c, 140);
1857 val = 19 + (vp56_rac_get_prob(c, 176) << 3);
1858 val += (vp56_rac_get_prob(c, 155) << 2);
1859 val += (vp56_rac_get_prob(c, 140) << 1);
1860 val += vp56_rac_get_prob(c, 135);
1862 } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
1863 val = 35 + (vp56_rac_get_prob(c, 180) << 4);
1864 val += (vp56_rac_get_prob(c, 157) << 3);
1865 val += (vp56_rac_get_prob(c, 141) << 2);
1866 val += (vp56_rac_get_prob(c, 134) << 1);
1867 val += vp56_rac_get_prob(c, 130);
1869 val = 67 + (vp56_rac_get_prob(c, 254) << 13);
1870 val += (vp56_rac_get_prob(c, 254) << 12);
1871 val += (vp56_rac_get_prob(c, 254) << 11);
1872 val += (vp56_rac_get_prob(c, 252) << 10);
1873 val += (vp56_rac_get_prob(c, 249) << 9);
1874 val += (vp56_rac_get_prob(c, 243) << 8);
1875 val += (vp56_rac_get_prob(c, 230) << 7);
1876 val += (vp56_rac_get_prob(c, 196) << 6);
1877 val += (vp56_rac_get_prob(c, 177) << 5);
1878 val += (vp56_rac_get_prob(c, 153) << 4);
1879 val += (vp56_rac_get_prob(c, 140) << 3);
1880 val += (vp56_rac_get_prob(c, 133) << 2);
1881 val += (vp56_rac_get_prob(c, 130) << 1);
1882 val += vp56_rac_get_prob(c, 129);
1887 band_left = band_counts[++band];
1888 if (tx == TX_32X32) // FIXME slow
1889 coef[rc] = ((vp8_rac_get(c) ? -val : val) * qmul[!!i]) / 2;
1891 coef[rc] = (vp8_rac_get(c) ? -val : val) * qmul[!!i];
1892 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
1894 } while (++i < n_coeffs);
1899 static int decode_coeffs(AVCodecContext *ctx)
1901 VP9Context *s = ctx->priv_data;
1902 VP9Block *const b = &s->b;
1903 int row = b->row, col = b->col;
1904 uint8_t (*p)[6][11] = s->prob.coef[b->tx][0 /* y */][!b->intra];
1905 unsigned (*c)[6][3] = s->counts.coef[b->tx][0 /* y */][!b->intra];
1906 unsigned (*e)[6][2] = s->counts.eob[b->tx][0 /* y */][!b->intra];
1907 int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1;
1908 int end_x = FFMIN(2 * (s->cols - col), w4);
1909 int end_y = FFMIN(2 * (s->rows - row), h4);
1910 int n, pl, x, y, step1d = 1 << b->tx, step = 1 << (b->tx * 2);
1911 int uvstep1d = 1 << b->uvtx, uvstep = 1 << (b->uvtx * 2), res;
1912 int16_t (*qmul)[2] = s->segmentation.feat[b->seg_id].qmul;
1913 int tx = 4 * s->lossless + b->tx;
1914 const int16_t * const *yscans = vp9_scans[tx];
1915 const int16_t (* const *ynbs)[2] = vp9_scans_nb[tx];
1916 const int16_t *uvscan = vp9_scans[b->uvtx][DCT_DCT];
1917 const int16_t (*uvnb)[2] = vp9_scans_nb[b->uvtx][DCT_DCT];
1918 uint8_t *a = &s->above_y_nnz_ctx[col * 2];
1919 uint8_t *l = &s->left_y_nnz_ctx[(row & 7) << 1];
1920 static const int16_t band_counts[4][8] = {
1921 { 1, 2, 3, 4, 3, 16 - 13 },
1922 { 1, 2, 3, 4, 11, 64 - 21 },
1923 { 1, 2, 3, 4, 11, 256 - 21 },
1924 { 1, 2, 3, 4, 11, 1024 - 21 },
1926 const int16_t *y_band_counts = band_counts[b->tx];
1927 const int16_t *uv_band_counts = band_counts[b->uvtx];
1930 if (b->tx > TX_4X4) { // FIXME slow
1931 for (y = 0; y < end_y; y += step1d)
1932 for (x = 1; x < step1d; x++)
1934 for (x = 0; x < end_x; x += step1d)
1935 for (y = 1; y < step1d; y++)
1938 for (n = 0, y = 0; y < end_y; y += step1d) {
1939 for (x = 0; x < end_x; x += step1d, n += step) {
1940 enum TxfmType txtp = vp9_intra_txfm_type[b->mode[b->tx == TX_4X4 &&
1943 int nnz = a[x] + l[y];
1944 if ((res = decode_coeffs_b(&s->c, s->block + 16 * n, 16 * step,
1945 b->tx, c, e, p, nnz, yscans[txtp],
1946 ynbs[txtp], y_band_counts, qmul[0])) < 0)
1948 a[x] = l[y] = !!res;
1949 if (b->tx > TX_8X8) {
1950 AV_WN16A(&s->eob[n], res);
1956 if (b->tx > TX_4X4) { // FIXME slow
1957 for (y = 0; y < end_y; y += step1d)
1958 memset(&l[y + 1], l[y], FFMIN(end_y - y - 1, step1d - 1));
1959 for (x = 0; x < end_x; x += step1d)
1960 memset(&a[x + 1], a[x], FFMIN(end_x - x - 1, step1d - 1));
1963 p = s->prob.coef[b->uvtx][1 /* uv */][!b->intra];
1964 c = s->counts.coef[b->uvtx][1 /* uv */][!b->intra];
1965 e = s->counts.eob[b->uvtx][1 /* uv */][!b->intra];
1970 for (pl = 0; pl < 2; pl++) {
1971 a = &s->above_uv_nnz_ctx[pl][col];
1972 l = &s->left_uv_nnz_ctx[pl][row & 7];
1973 if (b->uvtx > TX_4X4) { // FIXME slow
1974 for (y = 0; y < end_y; y += uvstep1d)
1975 for (x = 1; x < uvstep1d; x++)
1977 for (x = 0; x < end_x; x += uvstep1d)
1978 for (y = 1; y < uvstep1d; y++)
1981 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
1982 for (x = 0; x < end_x; x += uvstep1d, n += uvstep) {
1983 int nnz = a[x] + l[y];
1984 if ((res = decode_coeffs_b(&s->c, s->uvblock[pl] + 16 * n,
1985 16 * uvstep, b->uvtx, c, e, p, nnz,
1986 uvscan, uvnb, uv_band_counts,
1989 a[x] = l[y] = !!res;
1990 if (b->uvtx > TX_8X8) {
1991 AV_WN16A(&s->uveob[pl][n], res);
1993 s->uveob[pl][n] = res;
1997 if (b->uvtx > TX_4X4) { // FIXME slow
1998 for (y = 0; y < end_y; y += uvstep1d)
1999 memset(&l[y + 1], l[y], FFMIN(end_y - y - 1, uvstep1d - 1));
2000 for (x = 0; x < end_x; x += uvstep1d)
2001 memset(&a[x + 1], a[x], FFMIN(end_x - x - 1, uvstep1d - 1));
2008 static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **a,
2009 uint8_t *dst_edge, ptrdiff_t stride_edge,
2010 uint8_t *dst_inner, ptrdiff_t stride_inner,
2011 uint8_t *l, int col, int x, int w,
2012 int row, int y, enum TxfmMode tx,
2015 int have_top = row > 0 || y > 0;
2016 int have_left = col > s->tiling.tile_col_start || x > 0;
2017 int have_right = x < w - 1;
2018 static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
2019 [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
2020 { DC_127_PRED, VERT_PRED } },
2021 [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
2022 { HOR_PRED, HOR_PRED } },
2023 [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
2024 { LEFT_DC_PRED, DC_PRED } },
2025 [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
2026 { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
2027 [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
2028 { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
2029 [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
2030 { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
2031 [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
2032 { HOR_DOWN_PRED, HOR_DOWN_PRED } },
2033 [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
2034 { DC_127_PRED, VERT_LEFT_PRED } },
2035 [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
2036 { HOR_UP_PRED, HOR_UP_PRED } },
2037 [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
2038 { HOR_PRED, TM_VP8_PRED } },
2040 static const struct {
2041 uint8_t needs_left:1;
2042 uint8_t needs_top:1;
2043 uint8_t needs_topleft:1;
2044 uint8_t needs_topright:1;
2045 } edges[N_INTRA_PRED_MODES] = {
2046 [VERT_PRED] = { .needs_top = 1 },
2047 [HOR_PRED] = { .needs_left = 1 },
2048 [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
2049 [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
2050 [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
2051 [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
2052 [HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
2053 [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
2054 [HOR_UP_PRED] = { .needs_left = 1 },
2055 [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 },
2056 [LEFT_DC_PRED] = { .needs_left = 1 },
2057 [TOP_DC_PRED] = { .needs_top = 1 },
2058 [DC_128_PRED] = { 0 },
2059 [DC_127_PRED] = { 0 },
2060 [DC_129_PRED] = { 0 }
2063 av_assert2(mode >= 0 && mode < 10);
2064 mode = mode_conv[mode][have_left][have_top];
2065 if (edges[mode].needs_top) {
2066 uint8_t *top, *topleft;
2067 int n_px_need = 4 << tx, n_px_have = (((s->cols - col) << !p) - x) * 4;
2068 int n_px_need_tr = 0;
2070 if (tx == TX_4X4 && edges[mode].needs_topright && have_right)
2073 // if top of sb64-row, use s->intra_pred_data[] instead of
2074 // dst[-stride] for intra prediction (it contains pre- instead of
2075 // post-loopfilter data)
2077 top = !(row & 7) && !y ?
2078 s->intra_pred_data[p] + col * (8 >> !!p) + x * 4 :
2079 y == 0 ? &dst_edge[-stride_edge] : &dst_inner[-stride_inner];
2081 topleft = !(row & 7) && !y ?
2082 s->intra_pred_data[p] + col * (8 >> !!p) + x * 4 :
2083 y == 0 || x == 0 ? &dst_edge[-stride_edge] :
2084 &dst_inner[-stride_inner];
2088 (!edges[mode].needs_topleft || (have_left && top == topleft)) &&
2089 (tx != TX_4X4 || !edges[mode].needs_topright || have_right) &&
2090 n_px_need + n_px_need_tr <= n_px_have) {
2094 if (n_px_need <= n_px_have) {
2095 memcpy(*a, top, n_px_need);
2097 memcpy(*a, top, n_px_have);
2098 memset(&(*a)[n_px_have], (*a)[n_px_have - 1],
2099 n_px_need - n_px_have);
2102 memset(*a, 127, n_px_need);
2104 if (edges[mode].needs_topleft) {
2105 if (have_left && have_top) {
2106 (*a)[-1] = topleft[-1];
2108 (*a)[-1] = have_top ? 129 : 127;
2111 if (tx == TX_4X4 && edges[mode].needs_topright) {
2112 if (have_top && have_right &&
2113 n_px_need + n_px_need_tr <= n_px_have) {
2114 memcpy(&(*a)[4], &top[4], 4);
2116 memset(&(*a)[4], (*a)[3], 4);
2121 if (edges[mode].needs_left) {
2123 int n_px_need = 4 << tx, i, n_px_have = (((s->rows - row) << !p) - y) * 4;
2124 uint8_t *dst = x == 0 ? dst_edge : dst_inner;
2125 ptrdiff_t stride = x == 0 ? stride_edge : stride_inner;
2127 if (n_px_need <= n_px_have) {
2128 for (i = 0; i < n_px_need; i++)
2129 l[i] = dst[i * stride - 1];
2131 for (i = 0; i < n_px_have; i++)
2132 l[i] = dst[i * stride - 1];
2133 memset(&l[i], l[i - 1], n_px_need - n_px_have);
2136 memset(l, 129, 4 << tx);
2143 static void intra_recon(AVCodecContext *ctx, ptrdiff_t y_off, ptrdiff_t uv_off)
2145 VP9Context *s = ctx->priv_data;
2146 VP9Block *const b = &s->b;
2147 int row = b->row, col = b->col;
2148 int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
2149 int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
2150 int end_x = FFMIN(2 * (s->cols - col), w4);
2151 int end_y = FFMIN(2 * (s->rows - row), h4);
2152 int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless;
2153 int uvstep1d = 1 << b->uvtx, p;
2154 uint8_t *dst = b->dst[0], *dst_r = s->f->data[0] + y_off;
2156 for (n = 0, y = 0; y < end_y; y += step1d) {
2157 uint8_t *ptr = dst, *ptr_r = dst_r;
2158 for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d,
2159 ptr_r += 4 * step1d, n += step) {
2160 int mode = b->mode[b->bs > BS_8x8 && b->tx == TX_4X4 ?
2162 LOCAL_ALIGNED_16(uint8_t, a_buf, [48]);
2163 uint8_t *a = &a_buf[16], l[32];
2164 enum TxfmType txtp = vp9_intra_txfm_type[mode];
2165 int eob = b->skip ? 0 : b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
2167 mode = check_intra_mode(s, mode, &a, ptr_r, s->f->linesize[0],
2168 ptr, b->y_stride, l,
2169 col, x, w4, row, y, b->tx, 0);
2170 s->dsp.intra_pred[b->tx][mode](ptr, b->y_stride, l, a);
2172 s->dsp.itxfm_add[tx][txtp](ptr, b->y_stride,
2173 s->block + 16 * n, eob);
2175 dst_r += 4 * s->f->linesize[0] * step1d;
2176 dst += 4 * b->y_stride * step1d;
2184 step = 1 << (b->uvtx * 2);
2185 for (p = 0; p < 2; p++) {
2186 dst = b->dst[1 + p];
2187 dst_r = s->f->data[1 + p] + uv_off;
2188 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
2189 uint8_t *ptr = dst, *ptr_r = dst_r;
2190 for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d,
2191 ptr_r += 4 * uvstep1d, n += step) {
2192 int mode = b->uvmode;
2193 LOCAL_ALIGNED_16(uint8_t, a_buf, [48]);
2194 uint8_t *a = &a_buf[16], l[32];
2195 int eob = b->skip ? 0 : b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
2197 mode = check_intra_mode(s, mode, &a, ptr_r, s->f->linesize[1],
2198 ptr, b->uv_stride, l,
2199 col, x, w4, row, y, b->uvtx, p + 1);
2200 s->dsp.intra_pred[b->uvtx][mode](ptr, b->uv_stride, l, a);
2202 s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, b->uv_stride,
2203 s->uvblock[p] + 16 * n, eob);
2205 dst_r += 4 * uvstep1d * s->f->linesize[1];
2206 dst += 4 * uvstep1d * b->uv_stride;
2211 static av_always_inline void mc_luma_dir(VP9Context *s, vp9_mc_func (*mc)[2],
2212 uint8_t *dst, ptrdiff_t dst_stride,
2213 const uint8_t *ref, ptrdiff_t ref_stride,
2214 ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
2215 int bw, int bh, int w, int h)
2217 int mx = mv->x, my = mv->y;
2221 ref += y * ref_stride + x;
2224 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
2225 if (x < !!mx * 3 || y < !!my * 3 ||
2226 x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
2227 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
2228 ref - !!my * 3 * ref_stride - !!mx * 3,
2230 bw + !!mx * 7, bh + !!my * 7,
2231 x - !!mx * 3, y - !!my * 3, w, h);
2232 ref = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
2235 mc[!!mx][!!my](dst, dst_stride, ref, ref_stride, bh, mx << 1, my << 1);
2238 static av_always_inline void mc_chroma_dir(VP9Context *s, vp9_mc_func (*mc)[2],
2239 uint8_t *dst_u, uint8_t *dst_v,
2240 ptrdiff_t dst_stride,
2241 const uint8_t *ref_u, ptrdiff_t src_stride_u,
2242 const uint8_t *ref_v, ptrdiff_t src_stride_v,
2243 ptrdiff_t y, ptrdiff_t x, const VP56mv *mv,
2244 int bw, int bh, int w, int h)
2246 int mx = mv->x, my = mv->y;
2250 ref_u += y * src_stride_u + x;
2251 ref_v += y * src_stride_v + x;
2254 // FIXME bilinear filter only needs 0/1 pixels, not 3/4
2255 if (x < !!mx * 3 || y < !!my * 3 ||
2256 x + !!mx * 4 > w - bw || y + !!my * 4 > h - bh) {
2257 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
2258 ref_u - !!my * 3 * src_stride_u - !!mx * 3,
2260 bw + !!mx * 7, bh + !!my * 7,
2261 x - !!mx * 3, y - !!my * 3, w, h);
2262 ref_u = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
2263 mc[!!mx][!!my](dst_u, dst_stride, ref_u, 80, bh, mx, my);
2265 s->vdsp.emulated_edge_mc(s->edge_emu_buffer,
2266 ref_v - !!my * 3 * src_stride_v - !!mx * 3,
2268 bw + !!mx * 7, bh + !!my * 7,
2269 x - !!mx * 3, y - !!my * 3, w, h);
2270 ref_v = s->edge_emu_buffer + !!my * 3 * 80 + !!mx * 3;
2271 mc[!!mx][!!my](dst_v, dst_stride, ref_v, 80, bh, mx, my);
2273 mc[!!mx][!!my](dst_u, dst_stride, ref_u, src_stride_u, bh, mx, my);
2274 mc[!!mx][!!my](dst_v, dst_stride, ref_v, src_stride_v, bh, mx, my);
2278 static void inter_recon(AVCodecContext *ctx)
2280 static const uint8_t bwlog_tab[2][N_BS_SIZES] = {
2281 { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4 },
2282 { 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4 },
2284 VP9Context *s = ctx->priv_data;
2285 VP9Block *const b = &s->b;
2286 int row = b->row, col = b->col;
2287 AVFrame *ref1 = s->refs[s->refidx[b->ref[0]]];
2288 AVFrame *ref2 = b->comp ? s->refs[s->refidx[b->ref[1]]] : NULL;
2289 int w = ctx->width, h = ctx->height;
2290 ptrdiff_t ls_y = b->y_stride, ls_uv = b->uv_stride;
2293 if (b->bs > BS_8x8) {
2294 if (b->bs == BS_8x4) {
2295 mc_luma_dir(s, s->dsp.mc[3][b->filter][0], b->dst[0], ls_y,
2296 ref1->data[0], ref1->linesize[0],
2297 row << 3, col << 3, &b->mv[0][0], 8, 4, w, h);
2298 mc_luma_dir(s, s->dsp.mc[3][b->filter][0],
2299 b->dst[0] + 4 * ls_y, ls_y,
2300 ref1->data[0], ref1->linesize[0],
2301 (row << 3) + 4, col << 3, &b->mv[2][0], 8, 4, w, h);
2304 mc_luma_dir(s, s->dsp.mc[3][b->filter][1], b->dst[0], ls_y,
2305 ref2->data[0], ref2->linesize[0],
2306 row << 3, col << 3, &b->mv[0][1], 8, 4, w, h);
2307 mc_luma_dir(s, s->dsp.mc[3][b->filter][1],
2308 b->dst[0] + 4 * ls_y, ls_y,
2309 ref2->data[0], ref2->linesize[0],
2310 (row << 3) + 4, col << 3, &b->mv[2][1], 8, 4, w, h);
2312 } else if (b->bs == BS_4x8) {
2313 mc_luma_dir(s, s->dsp.mc[4][b->filter][0], b->dst[0], ls_y,
2314 ref1->data[0], ref1->linesize[0],
2315 row << 3, col << 3, &b->mv[0][0], 4, 8, w, h);
2316 mc_luma_dir(s, s->dsp.mc[4][b->filter][0], b->dst[0] + 4, ls_y,
2317 ref1->data[0], ref1->linesize[0],
2318 row << 3, (col << 3) + 4, &b->mv[1][0], 4, 8, w, h);
2321 mc_luma_dir(s, s->dsp.mc[4][b->filter][1], b->dst[0], ls_y,
2322 ref2->data[0], ref2->linesize[0],
2323 row << 3, col << 3, &b->mv[0][1], 4, 8, w, h);
2324 mc_luma_dir(s, s->dsp.mc[4][b->filter][1], b->dst[0] + 4, ls_y,
2325 ref2->data[0], ref2->linesize[0],
2326 row << 3, (col << 3) + 4, &b->mv[1][1], 4, 8, w, h);
2329 av_assert2(b->bs == BS_4x4);
2331 // FIXME if two horizontally adjacent blocks have the same MV,
2332 // do a w8 instead of a w4 call
2333 mc_luma_dir(s, s->dsp.mc[4][b->filter][0], b->dst[0], ls_y,
2334 ref1->data[0], ref1->linesize[0],
2335 row << 3, col << 3, &b->mv[0][0], 4, 4, w, h);
2336 mc_luma_dir(s, s->dsp.mc[4][b->filter][0], b->dst[0] + 4, ls_y,
2337 ref1->data[0], ref1->linesize[0],
2338 row << 3, (col << 3) + 4, &b->mv[1][0], 4, 4, w, h);
2339 mc_luma_dir(s, s->dsp.mc[4][b->filter][0],
2340 b->dst[0] + 4 * ls_y, ls_y,
2341 ref1->data[0], ref1->linesize[0],
2342 (row << 3) + 4, col << 3, &b->mv[2][0], 4, 4, w, h);
2343 mc_luma_dir(s, s->dsp.mc[4][b->filter][0],
2344 b->dst[0] + 4 * ls_y + 4, ls_y,
2345 ref1->data[0], ref1->linesize[0],
2346 (row << 3) + 4, (col << 3) + 4, &b->mv[3][0], 4, 4, w, h);
2349 mc_luma_dir(s, s->dsp.mc[4][b->filter][1], b->dst[0], ls_y,
2350 ref2->data[0], ref2->linesize[0],
2351 row << 3, col << 3, &b->mv[0][1], 4, 4, w, h);
2352 mc_luma_dir(s, s->dsp.mc[4][b->filter][1], b->dst[0] + 4, ls_y,
2353 ref2->data[0], ref2->linesize[0],
2354 row << 3, (col << 3) + 4, &b->mv[1][1], 4, 4, w, h);
2355 mc_luma_dir(s, s->dsp.mc[4][b->filter][1],
2356 b->dst[0] + 4 * ls_y, ls_y,
2357 ref2->data[0], ref2->linesize[0],
2358 (row << 3) + 4, col << 3, &b->mv[2][1], 4, 4, w, h);
2359 mc_luma_dir(s, s->dsp.mc[4][b->filter][1],
2360 b->dst[0] + 4 * ls_y + 4, ls_y,
2361 ref2->data[0], ref2->linesize[0],
2362 (row << 3) + 4, (col << 3) + 4, &b->mv[3][1], 4, 4, w, h);
2366 int bwl = bwlog_tab[0][b->bs];
2367 int bw = bwh_tab[0][b->bs][0] * 4, bh = bwh_tab[0][b->bs][1] * 4;
2369 mc_luma_dir(s, s->dsp.mc[bwl][b->filter][0], b->dst[0], ls_y,
2370 ref1->data[0], ref1->linesize[0],
2371 row << 3, col << 3, &b->mv[0][0],bw, bh, w, h);
2374 mc_luma_dir(s, s->dsp.mc[bwl][b->filter][1], b->dst[0], ls_y,
2375 ref2->data[0], ref2->linesize[0],
2376 row << 3, col << 3, &b->mv[0][1], bw, bh, w, h);
2381 int bwl = bwlog_tab[1][b->bs];
2382 int bw = bwh_tab[1][b->bs][0] * 4, bh = bwh_tab[1][b->bs][1] * 4;
2387 if (b->bs > BS_8x8) {
2388 mvuv.x = ROUNDED_DIV(b->mv[0][0].x + b->mv[1][0].x + b->mv[2][0].x + b->mv[3][0].x, 4);
2389 mvuv.y = ROUNDED_DIV(b->mv[0][0].y + b->mv[1][0].y + b->mv[2][0].y + b->mv[3][0].y, 4);
2394 mc_chroma_dir(s, s->dsp.mc[bwl][b->filter][0],
2395 b->dst[1], b->dst[2], ls_uv,
2396 ref1->data[1], ref1->linesize[1],
2397 ref1->data[2], ref1->linesize[2],
2398 row << 2, col << 2, &mvuv, bw, bh, w, h);
2401 if (b->bs > BS_8x8) {
2402 mvuv.x = ROUNDED_DIV(b->mv[0][1].x + b->mv[1][1].x + b->mv[2][1].x + b->mv[3][1].x, 4);
2403 mvuv.y = ROUNDED_DIV(b->mv[0][1].y + b->mv[1][1].y + b->mv[2][1].y + b->mv[3][1].y, 4);
2407 mc_chroma_dir(s, s->dsp.mc[bwl][b->filter][1],
2408 b->dst[1], b->dst[2], ls_uv,
2409 ref2->data[1], ref2->linesize[1],
2410 ref2->data[2], ref2->linesize[2],
2411 row << 2, col << 2, &mvuv, bw, bh, w, h);
2416 /* mostly copied intra_reconn() */
2418 int w4 = bwh_tab[1][b->bs][0] << 1, step1d = 1 << b->tx, n;
2419 int h4 = bwh_tab[1][b->bs][1] << 1, x, y, step = 1 << (b->tx * 2);
2420 int end_x = FFMIN(2 * (s->cols - col), w4);
2421 int end_y = FFMIN(2 * (s->rows - row), h4);
2422 int tx = 4 * s->lossless + b->tx, uvtx = b->uvtx + 4 * s->lossless;
2423 int uvstep1d = 1 << b->uvtx, p;
2424 uint8_t *dst = b->dst[0];
2427 for (n = 0, y = 0; y < end_y; y += step1d) {
2429 for (x = 0; x < end_x; x += step1d, ptr += 4 * step1d, n += step) {
2430 int eob = b->tx > TX_8X8 ? AV_RN16A(&s->eob[n]) : s->eob[n];
2433 s->dsp.itxfm_add[tx][DCT_DCT](ptr, b->y_stride,
2434 s->block + 16 * n, eob);
2436 dst += 4 * b->y_stride * step1d;
2444 step = 1 << (b->uvtx * 2);
2445 for (p = 0; p < 2; p++) {
2446 dst = b->dst[p + 1];
2447 for (n = 0, y = 0; y < end_y; y += uvstep1d) {
2449 for (x = 0; x < end_x; x += uvstep1d, ptr += 4 * uvstep1d, n += step) {
2450 int eob = b->uvtx > TX_8X8 ? AV_RN16A(&s->uveob[p][n]) : s->uveob[p][n];
2453 s->dsp.itxfm_add[uvtx][DCT_DCT](ptr, b->uv_stride,
2454 s->uvblock[p] + 16 * n, eob);
2456 dst += 4 * uvstep1d * b->uv_stride;
2462 static av_always_inline void mask_edges(struct VP9Filter *lflvl, int is_uv,
2463 int row_and_7, int col_and_7,
2464 int w, int h, int col_end, int row_end,
2465 enum TxfmMode tx, int skip_inter)
2467 // FIXME I'm pretty sure all loops can be replaced by a single LUT if
2468 // we make VP9Filter.mask uint64_t (i.e. row/col all single variable)
2469 // and make the LUT 5-indexed (bl, bp, is_uv, tx and row/col), and then
2470 // use row_and_7/col_and_7 as shifts (1*col_and_7+8*row_and_7)
2472 // the intended behaviour of the vp9 loopfilter is to work on 8-pixel
2473 // edges. This means that for UV, we work on two subsampled blocks at
2474 // a time, and we only use the topleft block's mode information to set
2475 // things like block strength. Thus, for any block size smaller than
2476 // 16x16, ignore the odd portion of the block.
2477 if (tx == TX_4X4 && is_uv) {
2492 if (tx == TX_4X4 && !skip_inter) {
2493 int t = 1 << col_and_7, m_col = (t << w) - t, y;
2494 int m_col_odd = (t << (w - 1)) - t;
2496 // on 32-px edges, use the 8-px wide loopfilter; else, use 4-px wide
2498 int m_row_8 = m_col & 0x01, m_row_4 = m_col - m_row_8;
2500 for (y = row_and_7; y < h + row_and_7; y++) {
2501 int col_mask_id = 2 - !(y & 7);
2503 lflvl->mask[is_uv][0][y][1] |= m_row_8;
2504 lflvl->mask[is_uv][0][y][2] |= m_row_4;
2505 // for odd lines, if the odd col is not being filtered,
2506 // skip odd row also:
2513 // if a/c are even row/col and b/d are odd, and d is skipped,
2514 // e.g. right edge of size-66x66.webm, then skip b also (bug)
2515 if ((col_end & 1) && (y & 1)) {
2516 lflvl->mask[is_uv][1][y][col_mask_id] |= m_col_odd;
2518 lflvl->mask[is_uv][1][y][col_mask_id] |= m_col;
2522 int m_row_8 = m_col & 0x11, m_row_4 = m_col - m_row_8;
2524 for (y = row_and_7; y < h + row_and_7; y++) {
2525 int col_mask_id = 2 - !(y & 3);
2527 lflvl->mask[is_uv][0][y][1] |= m_row_8; // row edge
2528 lflvl->mask[is_uv][0][y][2] |= m_row_4;
2529 lflvl->mask[is_uv][1][y][col_mask_id] |= m_col; // col edge
2530 lflvl->mask[is_uv][0][y][3] |= m_col;
2531 lflvl->mask[is_uv][1][y][3] |= m_col;
2535 int y, t = 1 << col_and_7, m_col = (t << w) - t;
2538 int mask_id = (tx == TX_8X8);
2539 int l2 = tx + is_uv - 1, step1d = 1 << l2;
2540 static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
2541 int m_row = m_col & masks[l2];
2543 // at odd UV col/row edges tx16/tx32 loopfilter edges, force
2544 // 8wd loopfilter to prevent going off the visible edge.
2545 if (is_uv && tx > TX_8X8 && (w ^ (w - 1)) == 1) {
2546 int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
2547 int m_row_8 = m_row - m_row_16;
2549 for (y = row_and_7; y < h + row_and_7; y++) {
2550 lflvl->mask[is_uv][0][y][0] |= m_row_16;
2551 lflvl->mask[is_uv][0][y][1] |= m_row_8;
2554 for (y = row_and_7; y < h + row_and_7; y++)
2555 lflvl->mask[is_uv][0][y][mask_id] |= m_row;
2558 if (is_uv && tx > TX_8X8 && (h ^ (h - 1)) == 1) {
2559 for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
2560 lflvl->mask[is_uv][1][y][0] |= m_col;
2561 if (y - row_and_7 == h - 1)
2562 lflvl->mask[is_uv][1][y][1] |= m_col;
2564 for (y = row_and_7; y < h + row_and_7; y += step1d)
2565 lflvl->mask[is_uv][1][y][mask_id] |= m_col;
2567 } else if (tx != TX_4X4) {
2570 mask_id = (tx == TX_8X8) || (is_uv && h == 1);
2571 lflvl->mask[is_uv][1][row_and_7][mask_id] |= m_col;
2572 mask_id = (tx == TX_8X8) || (is_uv && w == 1);
2573 for (y = row_and_7; y < h + row_and_7; y++)
2574 lflvl->mask[is_uv][0][y][mask_id] |= t;
2576 int t8 = t & 0x01, t4 = t - t8;
2578 for (y = row_and_7; y < h + row_and_7; y++) {
2579 lflvl->mask[is_uv][0][y][2] |= t4;
2580 lflvl->mask[is_uv][0][y][1] |= t8;
2582 lflvl->mask[is_uv][1][row_and_7][2 - !(row_and_7 & 7)] |= m_col;
2584 int t8 = t & 0x11, t4 = t - t8;
2586 for (y = row_and_7; y < h + row_and_7; y++) {
2587 lflvl->mask[is_uv][0][y][2] |= t4;
2588 lflvl->mask[is_uv][0][y][1] |= t8;
2590 lflvl->mask[is_uv][1][row_and_7][2 - !(row_and_7 & 3)] |= m_col;
2595 static int decode_b(AVCodecContext *ctx, int row, int col,
2596 struct VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
2597 enum BlockLevel bl, enum BlockPartition bp)
2599 VP9Context *s = ctx->priv_data;
2600 VP9Block *const b = &s->b;
2601 enum BlockSize bs = bl * 3 + bp;
2602 int res, y, w4 = bwh_tab[1][bs][0], h4 = bwh_tab[1][bs][1], lvl;
2609 s->min_mv.x = -(128 + col * 64);
2610 s->min_mv.y = -(128 + row * 64);
2611 s->max_mv.x = 128 + (s->cols - col - w4) * 64;
2612 s->max_mv.y = 128 + (s->rows - row - h4) * 64;
2615 b->uvtx = b->tx - (w4 * 2 == (1 << b->tx) || h4 * 2 == (1 << b->tx));
2618 if ((res = decode_coeffs(ctx)) < 0)
2623 memset(&s->above_y_nnz_ctx[col * 2], 0, w4 * 2);
2624 memset(&s->left_y_nnz_ctx[(row & 7) << 1], 0, h4 * 2);
2625 for (pl = 0; pl < 2; pl++) {
2626 memset(&s->above_uv_nnz_ctx[pl][col], 0, w4);
2627 memset(&s->left_uv_nnz_ctx[pl][row & 7], 0, h4);
2631 // emulated overhangs if the stride of the target buffer can't hold. This
2632 // allows to support emu-edge and so on even if we have large block
2634 emu[0] = (col + w4) * 8 > s->f->linesize[0] ||
2635 (row + h4) > s->rows + 2 * !(ctx->flags & CODEC_FLAG_EMU_EDGE);
2636 emu[1] = (col + w4) * 4 > s->f->linesize[1] ||
2637 (row + h4) > s->rows + 2 * !(ctx->flags & CODEC_FLAG_EMU_EDGE);
2639 b->dst[0] = s->tmp_y;
2642 b->dst[0] = s->f->data[0] + yoff;
2643 b->y_stride = s->f->linesize[0];
2646 b->dst[1] = s->tmp_uv[0];
2647 b->dst[2] = s->tmp_uv[1];
2650 b->dst[1] = s->f->data[1] + uvoff;
2651 b->dst[2] = s->f->data[2] + uvoff;
2652 b->uv_stride = s->f->linesize[1];
2655 intra_recon(ctx, yoff, uvoff);
2660 int w = FFMIN(s->cols - col, w4) * 8, h = FFMIN(s->rows - row, h4) * 8, n, o = 0;
2662 for (n = 0; o < w; n++) {
2667 s->dsp.mc[n][0][0][0][0](s->f->data[0] + yoff + o, s->f->linesize[0],
2668 s->tmp_y + o, 64, h, 0, 0);
2674 int w = FFMIN(s->cols - col, w4) * 4, h = FFMIN(s->rows - row, h4) * 4, n, o = 0;
2676 for (n = 1; o < w; n++) {
2681 s->dsp.mc[n][0][0][0][0](s->f->data[1] + uvoff + o, s->f->linesize[1],
2682 s->tmp_uv[0] + o, 32, h, 0, 0);
2683 s->dsp.mc[n][0][0][0][0](s->f->data[2] + uvoff + o, s->f->linesize[2],
2684 s->tmp_uv[1] + o, 32, h, 0, 0);
2690 // pick filter level and find edges to apply filter to
2691 if (s->filter.level &&
2692 (lvl = s->segmentation.feat[b->seg_id].lflvl[b->intra ? 0 : b->ref[0] + 1]
2693 [b->mode[3] != ZEROMV]) > 0) {
2694 int x_end = FFMIN(s->cols - col, w4), y_end = FFMIN(s->rows - row, h4);
2695 int skip_inter = !b->intra && b->skip;
2697 for (y = 0; y < h4; y++)
2698 memset(&lflvl->level[((row & 7) + y) * 8 + (col & 7)], lvl, w4);
2699 mask_edges(lflvl, 0, row & 7, col & 7, x_end, y_end, 0, 0, b->tx, skip_inter);
2700 mask_edges(lflvl, 1, row & 7, col & 7, x_end, y_end,
2701 s->cols & 1 && col + w4 >= s->cols ? s->cols & 7 : 0,
2702 s->rows & 1 && row + h4 >= s->rows ? s->rows & 7 : 0,
2703 b->uvtx, skip_inter);
2705 if (!s->filter.lim_lut[lvl]) {
2706 int sharp = s->filter.sharpness;
2710 limit >>= (sharp + 3) >> 2;
2711 limit = FFMIN(limit, 9 - sharp);
2713 limit = FFMAX(limit, 1);
2715 s->filter.lim_lut[lvl] = limit;
2716 s->filter.mblim_lut[lvl] = 2 * (lvl + 2) + limit;
2723 static int decode_sb(AVCodecContext *ctx, int row, int col, struct VP9Filter *lflvl,
2724 ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl)
2726 VP9Context *s = ctx->priv_data;
2727 int c = ((s->above_partition_ctx[col] >> (3 - bl)) & 1) |
2728 (((s->left_partition_ctx[row & 0x7] >> (3 - bl)) & 1) << 1), res;
2729 const uint8_t *p = s->keyframe ? vp9_default_kf_partition_probs[bl][c] :
2730 s->prob.p.partition[bl][c];
2731 enum BlockPartition bp;
2732 ptrdiff_t hbs = 4 >> bl;
2735 bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
2736 res = decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
2737 } else if (col + hbs < s->cols) {
2738 if (row + hbs < s->rows) {
2739 bp = vp8_rac_get_tree(&s->c, vp9_partition_tree, p);
2741 case PARTITION_NONE:
2742 res = decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
2745 if (!(res = decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp))) {
2746 yoff += hbs * 8 * s->f->linesize[0];
2747 uvoff += hbs * 4 * s->f->linesize[1];
2748 res = decode_b(ctx, row + hbs, col, lflvl, yoff, uvoff, bl, bp);
2752 if (!(res = decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp))) {
2755 res = decode_b(ctx, row, col + hbs, lflvl, yoff, uvoff, bl, bp);
2758 case PARTITION_SPLIT:
2759 if (!(res = decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1))) {
2760 if (!(res = decode_sb(ctx, row, col + hbs, lflvl,
2761 yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1))) {
2762 yoff += hbs * 8 * s->f->linesize[0];
2763 uvoff += hbs * 4 * s->f->linesize[1];
2764 if (!(res = decode_sb(ctx, row + hbs, col, lflvl,
2765 yoff, uvoff, bl + 1)))
2766 res = decode_sb(ctx, row + hbs, col + hbs, lflvl,
2767 yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
2774 } else if (vp56_rac_get_prob_branchy(&s->c, p[1])) {
2775 bp = PARTITION_SPLIT;
2776 if (!(res = decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1)))
2777 res = decode_sb(ctx, row, col + hbs, lflvl,
2778 yoff + 8 * hbs, uvoff + 4 * hbs, bl + 1);
2781 res = decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
2783 } else if (row + hbs < s->rows) {
2784 if (vp56_rac_get_prob_branchy(&s->c, p[2])) {
2785 bp = PARTITION_SPLIT;
2786 if (!(res = decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1))) {
2787 yoff += hbs * 8 * s->f->linesize[0];
2788 uvoff += hbs * 4 * s->f->linesize[1];
2789 res = decode_sb(ctx, row + hbs, col, lflvl,
2790 yoff, uvoff, bl + 1);
2794 res = decode_b(ctx, row, col, lflvl, yoff, uvoff, bl, bp);
2797 bp = PARTITION_SPLIT;
2798 res = decode_sb(ctx, row, col, lflvl, yoff, uvoff, bl + 1);
2800 s->counts.partition[bl][c][bp]++;
2805 static void loopfilter_sb(AVCodecContext *ctx, struct VP9Filter *lflvl,
2806 int row, int col, ptrdiff_t yoff, ptrdiff_t uvoff)
2808 VP9Context *s = ctx->priv_data;
2809 uint8_t *dst = s->f->data[0] + yoff, *lvl = lflvl->level;
2810 ptrdiff_t ls_y = s->f->linesize[0], ls_uv = s->f->linesize[1];
2813 // FIXME in how far can we interleave the v/h loopfilter calls? E.g.
2814 // if you think of them as acting on a 8x8 block max, we can interleave
2815 // each v/h within the single x loop, but that only works if we work on
2816 // 8 pixel blocks, and we won't always do that (we want at least 16px
2817 // to use SSE2 optimizations, perhaps 32 for AVX2)
2819 // filter edges between columns, Y plane (e.g. block1 | block2)
2820 for (y = 0; y < 8; y += 2, dst += 16 * ls_y, lvl += 16) {
2821 uint8_t *ptr = dst, *l = lvl, *hmask1 = lflvl->mask[0][0][y];
2822 uint8_t *hmask2 = lflvl->mask[0][0][y + 1];
2823 unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2], hm13 = hmask1[3];
2824 unsigned hm2 = hmask2[1] | hmask2[2], hm23 = hmask2[3];
2825 unsigned hm = hm1 | hm2 | hm13 | hm23;
2827 for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 8, l++) {
2829 int L = *l, H = L >> 4;
2830 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2833 if (hmask1[0] & x) {
2834 if (hmask2[0] & x) {
2835 av_assert2(l[8] == L);
2836 s->dsp.loop_filter_16[0](ptr, ls_y, E, I, H);
2838 s->dsp.loop_filter_8[2][0](ptr, ls_y, E, I, H);
2840 } else if (hm2 & x) {
2843 E |= s->filter.mblim_lut[L] << 8;
2844 I |= s->filter.lim_lut[L] << 8;
2845 s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
2847 [0](ptr, ls_y, E, I, H);
2849 s->dsp.loop_filter_8[!!(hmask1[1] & x)]
2850 [0](ptr, ls_y, E, I, H);
2853 } else if (hm2 & x) {
2854 int L = l[8], H = L >> 4;
2855 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2858 s->dsp.loop_filter_8[!!(hmask2[1] & x)]
2859 [0](ptr + 8 * ls_y, ls_y, E, I, H);
2863 int L = *l, H = L >> 4;
2864 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2869 E |= s->filter.mblim_lut[L] << 8;
2870 I |= s->filter.lim_lut[L] << 8;
2871 s->dsp.loop_filter_mix2[0][0][0](ptr + 4, ls_y, E, I, H);
2873 s->dsp.loop_filter_8[0][0](ptr + 4, ls_y, E, I, H);
2875 } else if (hm23 & x) {
2876 int L = l[8], H = L >> 4;
2877 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2879 s->dsp.loop_filter_8[0][0](ptr + 8 * ls_y + 4, ls_y, E, I, H);
2885 // filter edges between rows, Y plane (e.g. ------)
2887 dst = s->f->data[0] + yoff;
2889 for (y = 0; y < 8; y++, dst += 8 * ls_y, lvl += 8) {
2890 uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[0][1][y];
2891 unsigned vm = vmask[0] | vmask[1] | vmask[2], vm3 = vmask[3];
2893 for (x = 1; vm & ~(x - 1); x <<= 2, ptr += 16, l += 2) {
2896 int L = *l, H = L >> 4;
2897 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2900 if (vmask[0] & (x << 1)) {
2901 av_assert2(l[1] == L);
2902 s->dsp.loop_filter_16[1](ptr, ls_y, E, I, H);
2904 s->dsp.loop_filter_8[2][1](ptr, ls_y, E, I, H);
2906 } else if (vm & (x << 1)) {
2909 E |= s->filter.mblim_lut[L] << 8;
2910 I |= s->filter.lim_lut[L] << 8;
2911 s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
2912 [!!(vmask[1] & (x << 1))]
2913 [1](ptr, ls_y, E, I, H);
2915 s->dsp.loop_filter_8[!!(vmask[1] & x)]
2916 [1](ptr, ls_y, E, I, H);
2918 } else if (vm & (x << 1)) {
2919 int L = l[1], H = L >> 4;
2920 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2922 s->dsp.loop_filter_8[!!(vmask[1] & (x << 1))]
2923 [1](ptr + 8, ls_y, E, I, H);
2927 int L = *l, H = L >> 4;
2928 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2930 if (vm3 & (x << 1)) {
2933 E |= s->filter.mblim_lut[L] << 8;
2934 I |= s->filter.lim_lut[L] << 8;
2935 s->dsp.loop_filter_mix2[0][0][1](ptr + ls_y * 4, ls_y, E, I, H);
2937 s->dsp.loop_filter_8[0][1](ptr + ls_y * 4, ls_y, E, I, H);
2939 } else if (vm3 & (x << 1)) {
2940 int L = l[1], H = L >> 4;
2941 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2943 s->dsp.loop_filter_8[0][1](ptr + ls_y * 4 + 8, ls_y, E, I, H);
2948 // same principle but for U/V planes
2949 for (p = 0; p < 2; p++) {
2951 dst = s->f->data[1 + p] + uvoff;
2952 for (y = 0; y < 8; y += 4, dst += 16 * ls_uv, lvl += 32) {
2953 uint8_t *ptr = dst, *l = lvl, *hmask1 = lflvl->mask[1][0][y];
2954 uint8_t *hmask2 = lflvl->mask[1][0][y + 2];
2955 unsigned hm1 = hmask1[0] | hmask1[1] | hmask1[2];
2956 unsigned hm2 = hmask2[1] | hmask2[2], hm = hm1 | hm2;
2958 for (x = 1; hm & ~(x - 1); x <<= 1, ptr += 4) {
2961 int L = *l, H = L >> 4;
2962 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2964 if (hmask1[0] & x) {
2965 if (hmask2[0] & x) {
2966 av_assert2(l[16] == L);
2967 s->dsp.loop_filter_16[0](ptr, ls_uv, E, I, H);
2969 s->dsp.loop_filter_8[2][0](ptr, ls_uv, E, I, H);
2971 } else if (hm2 & x) {
2974 E |= s->filter.mblim_lut[L] << 8;
2975 I |= s->filter.lim_lut[L] << 8;
2976 s->dsp.loop_filter_mix2[!!(hmask1[1] & x)]
2978 [0](ptr, ls_uv, E, I, H);
2980 s->dsp.loop_filter_8[!!(hmask1[1] & x)]
2981 [0](ptr, ls_uv, E, I, H);
2983 } else if (hm2 & x) {
2984 int L = l[16], H = L >> 4;
2985 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
2987 s->dsp.loop_filter_8[!!(hmask2[1] & x)]
2988 [0](ptr + 8 * ls_uv, ls_uv, E, I, H);
2996 dst = s->f->data[1 + p] + uvoff;
2997 for (y = 0; y < 8; y++, dst += 4 * ls_uv) {
2998 uint8_t *ptr = dst, *l = lvl, *vmask = lflvl->mask[1][1][y];
2999 unsigned vm = vmask[0] | vmask[1] | vmask[2];
3001 for (x = 1; vm & ~(x - 1); x <<= 4, ptr += 16, l += 4) {
3004 int L = *l, H = L >> 4;
3005 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
3008 if (vmask[0] & (x << 2)) {
3009 av_assert2(l[2] == L);
3010 s->dsp.loop_filter_16[1](ptr, ls_uv, E, I, H);
3012 s->dsp.loop_filter_8[2][1](ptr, ls_uv, E, I, H);
3014 } else if (vm & (x << 2)) {
3017 E |= s->filter.mblim_lut[L] << 8;
3018 I |= s->filter.lim_lut[L] << 8;
3019 s->dsp.loop_filter_mix2[!!(vmask[1] & x)]
3020 [!!(vmask[1] & (x << 2))]
3021 [1](ptr, ls_uv, E, I, H);
3023 s->dsp.loop_filter_8[!!(vmask[1] & x)]
3024 [1](ptr, ls_uv, E, I, H);
3026 } else if (vm & (x << 2)) {
3027 int L = l[2], H = L >> 4;
3028 int E = s->filter.mblim_lut[L], I = s->filter.lim_lut[L];
3030 s->dsp.loop_filter_8[!!(vmask[1] & (x << 2))]
3031 [1](ptr + 8, ls_uv, E, I, H);
3041 static void set_tile_offset(int *start, int *end, int idx, int log2_n, int n)
3043 int sb_start = ( idx * n) >> log2_n;
3044 int sb_end = ((idx + 1) * n) >> log2_n;
3045 *start = FFMIN(sb_start, n) << 3;
3046 *end = FFMIN(sb_end, n) << 3;
3049 static av_always_inline void adapt_prob(uint8_t *p, unsigned ct0, unsigned ct1,
3050 int max_count, int update_factor)
3052 unsigned ct = ct0 + ct1, p2, p1;
3058 p2 = ((ct0 << 8) + (ct >> 1)) / ct;
3059 p2 = av_clip(p2, 1, 255);
3060 ct = FFMIN(ct, max_count);
3061 update_factor = FASTDIV(update_factor * ct, max_count);
3063 // (p1 * (256 - update_factor) + p2 * update_factor + 128) >> 8
3064 *p = p1 + (((p2 - p1) * update_factor + 128) >> 8);
3067 static void adapt_probs(VP9Context *s)
3070 prob_context *p = &s->prob_ctx[s->framectxid].p;
3071 int uf = (s->keyframe || s->intraonly || !s->last_keyframe) ? 112 : 128;
3074 for (i = 0; i < 4; i++)
3075 for (j = 0; j < 2; j++)
3076 for (k = 0; k < 2; k++)
3077 for (l = 0; l < 6; l++)
3078 for (m = 0; m < 6; m++) {
3079 uint8_t *pp = s->prob_ctx[s->framectxid].coef[i][j][k][l][m];
3080 unsigned *e = s->counts.eob[i][j][k][l][m];
3081 unsigned *c = s->counts.coef[i][j][k][l][m];
3083 if (l == 0 && m >= 3) // dc only has 3 pt
3086 adapt_prob(&pp[0], e[0], e[1], 24, uf);
3087 adapt_prob(&pp[1], c[0], c[1] + c[2], 24, uf);
3088 adapt_prob(&pp[2], c[1], c[2], 24, uf);
3091 if (s->keyframe || s->intraonly) {
3092 memcpy(p->skip, s->prob.p.skip, sizeof(p->skip));
3093 memcpy(p->tx32p, s->prob.p.tx32p, sizeof(p->tx32p));
3094 memcpy(p->tx16p, s->prob.p.tx16p, sizeof(p->tx16p));
3095 memcpy(p->tx8p, s->prob.p.tx8p, sizeof(p->tx8p));
3100 for (i = 0; i < 3; i++)
3101 adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128);
3104 for (i = 0; i < 4; i++)
3105 adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128);
3108 if (s->comppredmode == PRED_SWITCHABLE) {
3109 for (i = 0; i < 5; i++)
3110 adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128);
3114 if (s->comppredmode != PRED_SINGLEREF) {
3115 for (i = 0; i < 5; i++)
3116 adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
3117 s->counts.comp_ref[i][1], 20, 128);
3120 if (s->comppredmode != PRED_COMPREF) {
3121 for (i = 0; i < 5; i++) {
3122 uint8_t *pp = p->single_ref[i];
3123 unsigned (*c)[2] = s->counts.single_ref[i];
3125 adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
3126 adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
3130 // block partitioning
3131 for (i = 0; i < 4; i++)
3132 for (j = 0; j < 4; j++) {
3133 uint8_t *pp = p->partition[i][j];
3134 unsigned *c = s->counts.partition[i][j];
3136 adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
3137 adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
3138 adapt_prob(&pp[2], c[2], c[3], 20, 128);
3142 if (s->txfmmode == TX_SWITCHABLE) {
3143 for (i = 0; i < 2; i++) {
3144 unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
3146 adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128);
3147 adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
3148 adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
3149 adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
3150 adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
3151 adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
3155 // interpolation filter
3156 if (s->filtermode == FILTER_SWITCHABLE) {
3157 for (i = 0; i < 4; i++) {
3158 uint8_t *pp = p->filter[i];
3159 unsigned *c = s->counts.filter[i];
3161 adapt_prob(&pp[0], c[0], c[1] + c[2], 20, 128);
3162 adapt_prob(&pp[1], c[1], c[2], 20, 128);
3167 for (i = 0; i < 7; i++) {
3168 uint8_t *pp = p->mv_mode[i];
3169 unsigned *c = s->counts.mv_mode[i];
3171 adapt_prob(&pp[0], c[2], c[1] + c[0] + c[3], 20, 128);
3172 adapt_prob(&pp[1], c[0], c[1] + c[3], 20, 128);
3173 adapt_prob(&pp[2], c[1], c[3], 20, 128);
3178 uint8_t *pp = p->mv_joint;
3179 unsigned *c = s->counts.mv_joint;
3181 adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
3182 adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
3183 adapt_prob(&pp[2], c[2], c[3], 20, 128);
3187 for (i = 0; i < 2; i++) {
3189 unsigned *c, (*c2)[2], sum;
3191 adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0],
3192 s->counts.mv_comp[i].sign[1], 20, 128);
<