4 * Copyright (C) 2012 - 2013 Guillaume Martres
5 * Copyright (C) 2013 Anand Meher Kotra
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 static const uint8_t l0_l1_cand_idx[12][2] = {
41 void ff_hevc_set_neighbour_available(HEVCContext *s, int x0, int y0,
44 HEVCLocalContext *lc = &s->HEVClc;
45 int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
46 int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
48 lc->na.cand_up = (lc->ctb_up_flag || y0b);
49 lc->na.cand_left = (lc->ctb_left_flag || x0b);
50 lc->na.cand_up_left = (!x0b && !y0b) ? lc->ctb_up_left_flag : lc->na.cand_left && lc->na.cand_up;
51 lc->na.cand_up_right_sap =
52 ((x0b + nPbW) == (1 << s->sps->log2_ctb_size)) ?
53 lc->ctb_up_right_flag && !y0b : lc->na.cand_up;
54 lc->na.cand_up_right =
55 ((x0b + nPbW) == (1 << s->sps->log2_ctb_size) ?
56 lc->ctb_up_right_flag && !y0b : lc->na.cand_up )
57 && (x0 + nPbW) < lc->end_of_tiles_x;
58 lc->na.cand_bottom_left = ((y0 + nPbH) >= lc->end_of_tiles_y) ? 0 : lc->na.cand_left;
62 * 6.4.1 Derivation process for z-scan order block availability
64 static int z_scan_block_avail(HEVCContext *s, int xCurr, int yCurr,
67 #define MIN_TB_ADDR_ZS(x, y) \
68 s->pps->min_tb_addr_zs[(y) * s->sps->min_tb_width + (x)]
69 int Curr = MIN_TB_ADDR_ZS(xCurr >> s->sps->log2_min_tb_size,
70 yCurr >> s->sps->log2_min_tb_size);
73 if (xN < 0 || yN < 0 ||
74 xN >= s->sps->width ||
78 N = MIN_TB_ADDR_ZS(xN >> s->sps->log2_min_tb_size,
79 yN >> s->sps->log2_min_tb_size);
84 static int same_prediction_block(HEVCLocalContext *lc, int log2_cb_size,
85 int x0, int y0, int nPbW, int nPbH,
86 int xA1, int yA1, int partIdx)
88 return !(nPbW << 1 == 1 << log2_cb_size &&
89 nPbH << 1 == 1 << log2_cb_size && partIdx == 1 &&
90 lc->cu.x + nPbW > xA1 &&
91 lc->cu.y + nPbH <= yA1);
95 * 6.4.2 Derivation process for prediction block availability
97 static int check_prediction_block_available(HEVCContext *s, int log2_cb_size,
98 int x0, int y0, int nPbW, int nPbH,
99 int xA1, int yA1, int partIdx)
101 HEVCLocalContext *lc = &s->HEVClc;
103 if (lc->cu.x < xA1 && lc->cu.y < yA1 &&
104 (lc->cu.x + (1 << log2_cb_size)) > xA1 &&
105 (lc->cu.y + (1 << log2_cb_size)) > yA1)
106 return same_prediction_block(lc, log2_cb_size, x0, y0,
107 nPbW, nPbH, xA1, yA1, partIdx);
109 return z_scan_block_avail(s, x0, y0, xA1, yA1);
112 //check if the two luma locations belong to the same mostion estimation region
113 static int isDiffMER(HEVCContext *s, int xN, int yN, int xP, int yP)
115 uint8_t plevel = s->pps->log2_parallel_merge_level;
117 return xN >> plevel == xP >> plevel &&
118 yN >> plevel == yP >> plevel;
121 #define MATCH(x) (A.x == B.x)
123 // check if the mv's and refidx are the same between A and B
124 static int compareMVrefidx(struct MvField A, struct MvField B)
126 if (A.pred_flag[0] && A.pred_flag[1] && B.pred_flag[0] && B.pred_flag[1])
127 return MATCH(ref_idx[0]) && MATCH(mv[0].x) && MATCH(mv[0].y) &&
128 MATCH(ref_idx[1]) && MATCH(mv[1].x) && MATCH(mv[1].y);
130 if (A.pred_flag[0] && !A.pred_flag[1] && B.pred_flag[0] && !B.pred_flag[1])
131 return MATCH(ref_idx[0]) && MATCH(mv[0].x) && MATCH(mv[0].y);
133 if (!A.pred_flag[0] && A.pred_flag[1] && !B.pred_flag[0] && B.pred_flag[1])
134 return MATCH(ref_idx[1]) && MATCH(mv[1].x) && MATCH(mv[1].y);
139 static av_always_inline void mv_scale(Mv *dst, Mv *src, int td, int tb)
141 int tx, scale_factor;
143 td = av_clip_int8_c(td);
144 tb = av_clip_int8_c(tb);
145 tx = (0x4000 + abs(td / 2)) / td;
146 scale_factor = av_clip_c((tb * tx + 32) >> 6, -4096, 4095);
147 dst->x = av_clip_int16_c((scale_factor * src->x + 127 +
148 (scale_factor * src->x < 0)) >> 8);
149 dst->y = av_clip_int16_c((scale_factor * src->y + 127 +
150 (scale_factor * src->y < 0)) >> 8);
153 static int check_mvset(Mv *mvLXCol, Mv *mvCol,
155 RefPicList *refPicList, int X, int refIdxLx,
156 RefPicList *refPicList_col, int listCol, int refidxCol)
158 int cur_lt = refPicList[X].isLongTerm[refIdxLx];
159 int col_lt = refPicList_col[listCol].isLongTerm[refidxCol];
160 int col_poc_diff, cur_poc_diff;
162 if (cur_lt != col_lt) {
168 col_poc_diff = colPic - refPicList_col[listCol].list[refidxCol];
169 cur_poc_diff = poc - refPicList[X].list[refIdxLx];
172 col_poc_diff = 1; // error resilience
174 if (cur_lt || col_poc_diff == cur_poc_diff) {
175 mvLXCol->x = mvCol->x;
176 mvLXCol->y = mvCol->y;
178 mv_scale(mvLXCol, mvCol, col_poc_diff, cur_poc_diff);
183 #define CHECK_MVSET(l) \
184 check_mvset(mvLXCol, temp_col.mv + l, \
186 refPicList, X, refIdxLx, \
187 refPicList_col, L ## l, temp_col.ref_idx[l])
189 // derive the motion vectors section 8.5.3.1.8
190 static int derive_temporal_colocated_mvs(HEVCContext *s, MvField temp_col,
191 int refIdxLx, Mv *mvLXCol, int X,
192 int colPic, RefPicList *refPicList_col)
194 RefPicList *refPicList = s->ref->refPicList;
196 if (temp_col.is_intra) {
202 if (temp_col.pred_flag[0] == 0)
203 return CHECK_MVSET(1);
204 else if (temp_col.pred_flag[0] == 1 && temp_col.pred_flag[1] == 0)
205 return CHECK_MVSET(0);
206 else if (temp_col.pred_flag[0] == 1 && temp_col.pred_flag[1] == 1) {
207 int check_diffpicount = 0;
209 for (i = 0; i < refPicList[0].nb_refs; i++) {
210 if (refPicList[0].list[i] > s->poc)
213 for (i = 0; i < refPicList[1].nb_refs; i++) {
214 if (refPicList[1].list[i] > s->poc)
217 if (check_diffpicount == 0 && X == 0)
218 return CHECK_MVSET(0);
219 else if (check_diffpicount == 0 && X == 1)
220 return CHECK_MVSET(1);
222 if (s->sh.collocated_list == L1)
223 return CHECK_MVSET(0);
225 return CHECK_MVSET(1);
232 #define TAB_MVF(x, y) \
233 tab_mvf[(y) * min_pu_width + x]
235 #define TAB_MVF_PU(v) \
236 TAB_MVF(x ## v ## _pu, y ## v ## _pu)
238 #define DERIVE_TEMPORAL_COLOCATED_MVS \
239 derive_temporal_colocated_mvs(s, temp_col, \
240 refIdxLx, mvLXCol, X, colPic, \
241 ff_hevc_get_ref_list(s, ref, x, y))
244 * 8.5.3.1.7 temporal luma motion vector prediction
246 static int temporal_luma_motion_vector(HEVCContext *s, int x0, int y0,
247 int nPbW, int nPbH, int refIdxLx,
252 int x, y, x_pu, y_pu;
253 int min_pu_width = s->sps->min_pu_width;
254 int availableFlagLXCol = 0;
257 HEVCFrame *ref = s->ref->collocated_ref;
262 tab_mvf = ref->tab_mvf;
265 //bottom right collocated motion vector
269 ff_thread_await_progress(&ref->tf, y, 0);
271 (y0 >> s->sps->log2_ctb_size) == (y >> s->sps->log2_ctb_size) &&
272 y < s->sps->height &&
276 x_pu = x >> s->sps->log2_min_pu_size;
277 y_pu = y >> s->sps->log2_min_pu_size;
278 temp_col = TAB_MVF(x_pu, y_pu);
279 availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
282 // derive center collocated motion vector
283 if (tab_mvf && !availableFlagLXCol) {
284 x = x0 + (nPbW >> 1);
285 y = y0 + (nPbH >> 1);
288 x_pu = x >> s->sps->log2_min_pu_size;
289 y_pu = y >> s->sps->log2_min_pu_size;
290 temp_col = TAB_MVF(x_pu, y_pu);
291 availableFlagLXCol = DERIVE_TEMPORAL_COLOCATED_MVS;
293 return availableFlagLXCol;
296 #define AVAILABLE(cand, v) \
297 (cand && !TAB_MVF_PU(v).is_intra)
299 #define PRED_BLOCK_AVAILABLE(v) \
300 check_prediction_block_available(s, log2_cb_size, \
301 x0, y0, nPbW, nPbH, \
302 x ## v, y ## v, part_idx)
304 #define COMPARE_MV_REFIDX(a, b) \
305 compareMVrefidx(TAB_MVF_PU(a), TAB_MVF_PU(b))
308 * 8.5.3.1.2 Derivation process for spatial merging candidates
310 static void derive_spatial_merge_candidates(HEVCContext *s, int x0, int y0,
313 int singleMCLFlag, int part_idx,
315 struct MvField mergecandlist[])
317 HEVCLocalContext *lc = &s->HEVClc;
318 RefPicList *refPicList = s->ref->refPicList;
319 MvField *tab_mvf = s->ref->tab_mvf;
321 const int min_pu_width = s->sps->min_pu_width;
323 const int cand_bottom_left = lc->na.cand_bottom_left;
324 const int cand_left = lc->na.cand_left;
325 const int cand_up_left = lc->na.cand_up_left;
326 const int cand_up = lc->na.cand_up;
327 const int cand_up_right = lc->na.cand_up_right_sap;
329 const int xA1 = x0 - 1;
330 const int yA1 = y0 + nPbH - 1;
331 const int xA1_pu = xA1 >> s->sps->log2_min_pu_size;
332 const int yA1_pu = yA1 >> s->sps->log2_min_pu_size;
334 const int xB1 = x0 + nPbW - 1;
335 const int yB1 = y0 - 1;
336 const int xB1_pu = xB1 >> s->sps->log2_min_pu_size;
337 const int yB1_pu = yB1 >> s->sps->log2_min_pu_size;
339 const int xB0 = x0 + nPbW;
340 const int yB0 = y0 - 1;
341 const int xB0_pu = xB0 >> s->sps->log2_min_pu_size;
342 const int yB0_pu = yB0 >> s->sps->log2_min_pu_size;
344 const int xA0 = x0 - 1;
345 const int yA0 = y0 + nPbH;
346 const int xA0_pu = xA0 >> s->sps->log2_min_pu_size;
347 const int yA0_pu = yA0 >> s->sps->log2_min_pu_size;
349 const int xB2 = x0 - 1;
350 const int yB2 = y0 - 1;
351 const int xB2_pu = xB2 >> s->sps->log2_min_pu_size;
352 const int yB2_pu = yB2 >> s->sps->log2_min_pu_size;
354 const int nb_refs = (s->sh.slice_type == P_SLICE) ?
355 s->sh.nb_refs[0] : FFMIN(s->sh.nb_refs[0], s->sh.nb_refs[1]);
361 int nb_merge_cand = 0;
362 int nb_orig_merge_cand = 0;
372 //first left spatial merge candidate
373 is_available_a1 = AVAILABLE(cand_left, A1);
375 if (!singleMCLFlag && part_idx == 1 &&
376 (lc->cu.part_mode == PART_Nx2N ||
377 lc->cu.part_mode == PART_nLx2N ||
378 lc->cu.part_mode == PART_nRx2N) ||
379 isDiffMER(s, xA1, yA1, x0, y0)) {
383 if (is_available_a1) {
384 mergecandlist[0] = TAB_MVF_PU(A1);
390 // above spatial merge candidate
391 is_available_b1 = AVAILABLE(cand_up, B1);
393 if (!singleMCLFlag && part_idx == 1 &&
394 (lc->cu.part_mode == PART_2NxN ||
395 lc->cu.part_mode == PART_2NxnU ||
396 lc->cu.part_mode == PART_2NxnD) ||
397 isDiffMER(s, xB1, yB1, x0, y0)) {
401 if (is_available_a1 && is_available_b1)
402 check_MER = !COMPARE_MV_REFIDX(B1, A1);
404 if (is_available_b1 && check_MER)
405 mergecandlist[nb_merge_cand++] = TAB_MVF_PU(B1);
407 // above right spatial merge candidate
409 check_B0 = PRED_BLOCK_AVAILABLE(B0);
411 is_available_b0 = check_B0 && AVAILABLE(cand_up_right, B0);
413 if (isDiffMER(s, xB0, yB0, x0, y0))
416 if (is_available_b1 && is_available_b0)
417 check_MER = !COMPARE_MV_REFIDX(B0, B1);
419 if (is_available_b0 && check_MER) {
420 mergecandlist[nb_merge_cand] = TAB_MVF_PU(B0);
421 if (merge_idx == nb_merge_cand)
426 // left bottom spatial merge candidate
428 check_A0 = PRED_BLOCK_AVAILABLE(A0);
430 is_available_a0 = check_A0 && AVAILABLE(cand_bottom_left, A0);
432 if (isDiffMER(s, xA0, yA0, x0, y0))
435 if (is_available_a1 && is_available_a0)
436 check_MER = !COMPARE_MV_REFIDX(A0, A1);
438 if (is_available_a0 && check_MER) {
439 mergecandlist[nb_merge_cand] = TAB_MVF_PU(A0);
440 if (merge_idx == nb_merge_cand)
445 // above left spatial merge candidate
448 is_available_b2 = AVAILABLE(cand_up_left, B2);
450 if (isDiffMER(s, xB2, yB2, x0, y0))
453 if (is_available_a1 && is_available_b2)
454 check_MER = !COMPARE_MV_REFIDX(B2, A1);
456 if (is_available_b1 && is_available_b2)
457 check_MER_1 = !COMPARE_MV_REFIDX(B2, B1);
459 if (is_available_b2 && check_MER && check_MER_1 && nb_merge_cand != 4) {
460 mergecandlist[nb_merge_cand] = TAB_MVF_PU(B2);
461 if (merge_idx == nb_merge_cand)
466 // temporal motion vector candidate
467 if (s->sh.slice_temporal_mvp_enabled_flag &&
468 nb_merge_cand < s->sh.max_num_merge_cand) {
469 Mv mv_l0_col, mv_l1_col;
470 int available_l0 = temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
472 int available_l1 = (s->sh.slice_type == B_SLICE) ?
473 temporal_luma_motion_vector(s, x0, y0, nPbW, nPbH,
474 0, &mv_l1_col, 1) : 0;
476 if (available_l0 || available_l1) {
477 mergecandlist[nb_merge_cand].is_intra = 0;
478 mergecandlist[nb_merge_cand].pred_flag[0] = available_l0;
479 mergecandlist[nb_merge_cand].pred_flag[1] = available_l1;
481 mergecandlist[nb_merge_cand].mv[0] = mv_l0_col;
482 mergecandlist[nb_merge_cand].ref_idx[0] = 0;
485 mergecandlist[nb_merge_cand].mv[1] = mv_l1_col;
486 mergecandlist[nb_merge_cand].ref_idx[1] = 0;
488 if (merge_idx == nb_merge_cand)
494 nb_orig_merge_cand = nb_merge_cand;
496 // combined bi-predictive merge candidates (applies for B slices)
497 if (s->sh.slice_type == B_SLICE && nb_orig_merge_cand > 1 &&
498 nb_orig_merge_cand < s->sh.max_num_merge_cand) {
501 for (comb_idx = 0; nb_merge_cand < s->sh.max_num_merge_cand &&
502 comb_idx < nb_orig_merge_cand * (nb_orig_merge_cand - 1); comb_idx++) {
503 int l0_cand_idx = l0_l1_cand_idx[comb_idx][0];
504 int l1_cand_idx = l0_l1_cand_idx[comb_idx][1];
505 MvField l0_cand = mergecandlist[l0_cand_idx];
506 MvField l1_cand = mergecandlist[l1_cand_idx];
508 if (l0_cand.pred_flag[0] && l1_cand.pred_flag[1] &&
509 (refPicList[0].list[l0_cand.ref_idx[0]] !=
510 refPicList[1].list[l1_cand.ref_idx[1]] ||
511 l0_cand.mv[0].x != l1_cand.mv[1].x ||
512 l0_cand.mv[0].y != l1_cand.mv[1].y)) {
513 mergecandlist[nb_merge_cand].ref_idx[0] = l0_cand.ref_idx[0];
514 mergecandlist[nb_merge_cand].ref_idx[1] = l1_cand.ref_idx[1];
515 mergecandlist[nb_merge_cand].pred_flag[0] = 1;
516 mergecandlist[nb_merge_cand].pred_flag[1] = 1;
517 mergecandlist[nb_merge_cand].mv[0].x = l0_cand.mv[0].x;
518 mergecandlist[nb_merge_cand].mv[0].y = l0_cand.mv[0].y;
519 mergecandlist[nb_merge_cand].mv[1].x = l1_cand.mv[1].x;
520 mergecandlist[nb_merge_cand].mv[1].y = l1_cand.mv[1].y;
521 mergecandlist[nb_merge_cand].is_intra = 0;
522 if (merge_idx == nb_merge_cand)
529 // append Zero motion vector candidates
530 while (nb_merge_cand < s->sh.max_num_merge_cand) {
531 mergecandlist[nb_merge_cand].pred_flag[0] = 1;
532 mergecandlist[nb_merge_cand].pred_flag[1] = s->sh.slice_type == B_SLICE;
533 mergecandlist[nb_merge_cand].mv[0].x = 0;
534 mergecandlist[nb_merge_cand].mv[0].y = 0;
535 mergecandlist[nb_merge_cand].mv[1].x = 0;
536 mergecandlist[nb_merge_cand].mv[1].y = 0;
537 mergecandlist[nb_merge_cand].is_intra = 0;
538 mergecandlist[nb_merge_cand].ref_idx[0] = zero_idx < nb_refs ? zero_idx : 0;
539 mergecandlist[nb_merge_cand].ref_idx[1] = zero_idx < nb_refs ? zero_idx : 0;
541 if (merge_idx == nb_merge_cand)
549 * 8.5.3.1.1 Derivation process of luma Mvs for merge mode
551 void ff_hevc_luma_mv_merge_mode(HEVCContext *s, int x0, int y0, int nPbW,
552 int nPbH, int log2_cb_size, int part_idx,
553 int merge_idx, MvField *mv)
555 int singleMCLFlag = 0;
556 int nCS = 1 << log2_cb_size;
557 struct MvField mergecand_list[MRG_MAX_NUM_CANDS] = { { { { 0 } } } };
560 HEVCLocalContext *lc = &s->HEVClc;
562 if (s->pps->log2_parallel_merge_level > 2 && nCS == 8) {
571 ff_hevc_set_neighbour_available(s, x0, y0, nPbW, nPbH);
572 derive_spatial_merge_candidates(s, x0, y0, nPbW, nPbH, log2_cb_size,
573 singleMCLFlag, part_idx,
574 merge_idx, mergecand_list);
576 if (mergecand_list[merge_idx].pred_flag[0] == 1 &&
577 mergecand_list[merge_idx].pred_flag[1] == 1 &&
578 (nPbW2 + nPbH2) == 12) {
579 mergecand_list[merge_idx].ref_idx[1] = -1;
580 mergecand_list[merge_idx].pred_flag[1] = 0;
583 *mv = mergecand_list[merge_idx];
586 static av_always_inline void dist_scale(HEVCContext *s, Mv *mv,
587 int min_pu_width, int x, int y,
588 int elist, int ref_idx_curr, int ref_idx)
590 RefPicList *refPicList = s->ref->refPicList;
591 MvField *tab_mvf = s->ref->tab_mvf;
592 int ref_pic_elist = refPicList[elist].list[TAB_MVF(x, y).ref_idx[elist]];
593 int ref_pic_curr = refPicList[ref_idx_curr].list[ref_idx];
595 if (ref_pic_elist != ref_pic_curr) {
596 int poc_diff = s->poc - ref_pic_elist;
599 mv_scale(mv, mv, poc_diff, s->poc - ref_pic_curr);
603 static int mv_mp_mode_mx(HEVCContext *s, int x, int y, int pred_flag_index,
604 Mv *mv, int ref_idx_curr, int ref_idx)
606 MvField *tab_mvf = s->ref->tab_mvf;
607 int min_pu_width = s->sps->min_pu_width;
609 RefPicList *refPicList = s->ref->refPicList;
611 if (TAB_MVF(x, y).pred_flag[pred_flag_index] == 1 &&
612 refPicList[pred_flag_index].list[TAB_MVF(x, y).ref_idx[pred_flag_index]] == refPicList[ref_idx_curr].list[ref_idx]) {
613 *mv = TAB_MVF(x, y).mv[pred_flag_index];
619 static int mv_mp_mode_mx_lt(HEVCContext *s, int x, int y, int pred_flag_index,
620 Mv *mv, int ref_idx_curr, int ref_idx)
622 MvField *tab_mvf = s->ref->tab_mvf;
623 int min_pu_width = s->sps->min_pu_width;
625 RefPicList *refPicList = s->ref->refPicList;
626 int currIsLongTerm = refPicList[ref_idx_curr].isLongTerm[ref_idx];
629 refPicList[pred_flag_index].isLongTerm[(TAB_MVF(x, y).ref_idx[pred_flag_index])];
631 if (TAB_MVF(x, y).pred_flag[pred_flag_index] &&
632 colIsLongTerm == currIsLongTerm) {
633 *mv = TAB_MVF(x, y).mv[pred_flag_index];
635 dist_scale(s, mv, min_pu_width, x, y,
636 pred_flag_index, ref_idx_curr, ref_idx);
642 #define MP_MX(v, pred, mx) \
643 mv_mp_mode_mx(s, x ## v ## _pu, y ## v ## _pu, pred, \
644 &mx, ref_idx_curr, ref_idx)
646 #define MP_MX_LT(v, pred, mx) \
647 mv_mp_mode_mx_lt(s, x ## v ## _pu, y ## v ## _pu, pred, \
648 &mx, ref_idx_curr, ref_idx)
650 void ff_hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
651 int nPbH, int log2_cb_size, int part_idx,
652 int merge_idx, MvField *mv,
653 int mvp_lx_flag, int LX)
655 HEVCLocalContext *lc = &s->HEVClc;
656 MvField *tab_mvf = s->ref->tab_mvf;
657 int isScaledFlag_L0 = 0;
658 int availableFlagLXA0 = 0;
659 int availableFlagLXB0 = 0;
660 int numMVPCandLX = 0;
661 int min_pu_width = s->sps->min_pu_width;
676 int xB1_pu = 0, yB1_pu = 0;
677 int is_available_b1 = 0;
680 int xB2_pu = 0, yB2_pu = 0;
681 int is_available_b2 = 0;
682 Mv mvpcand_list[2] = { { 0 } };
685 int ref_idx_curr = 0;
687 int pred_flag_index_l0;
688 int pred_flag_index_l1;
689 int x0b = x0 & ((1 << s->sps->log2_ctb_size) - 1);
690 int y0b = y0 & ((1 << s->sps->log2_ctb_size) - 1);
692 int cand_up = (lc->ctb_up_flag || y0b);
693 int cand_left = (lc->ctb_left_flag || x0b);
695 (!x0b && !y0b) ? lc->ctb_up_left_flag : cand_left && cand_up;
697 (x0b + nPbW == (1 << s->sps->log2_ctb_size) ||
698 x0 + nPbW >= lc->end_of_tiles_x) ? lc->ctb_up_right_flag && !y0b
700 int cand_bottom_left = (y0 + nPbH >= lc->end_of_tiles_y) ? 0 : cand_left;
703 ref_idx = mv->ref_idx[LX];
704 pred_flag_index_l0 = LX;
705 pred_flag_index_l1 = !LX;
707 // left bottom spatial candidate
710 xA0_pu = xA0 >> s->sps->log2_min_pu_size;
711 yA0_pu = yA0 >> s->sps->log2_min_pu_size;
713 is_available_a0 = PRED_BLOCK_AVAILABLE(A0) && AVAILABLE(cand_bottom_left, A0);
715 //left spatial merge candidate
718 xA1_pu = xA1 >> s->sps->log2_min_pu_size;
719 yA1_pu = yA1 >> s->sps->log2_min_pu_size;
721 is_available_a1 = AVAILABLE(cand_left, A1);
722 if (is_available_a0 || is_available_a1)
725 if (is_available_a0) {
726 availableFlagLXA0 = MP_MX(A0, pred_flag_index_l0, mxA);
727 if (!availableFlagLXA0)
728 availableFlagLXA0 = MP_MX(A0, pred_flag_index_l1, mxA);
731 if (is_available_a1 && !availableFlagLXA0) {
732 availableFlagLXA0 = MP_MX(A1, pred_flag_index_l0, mxA);
733 if (!availableFlagLXA0)
734 availableFlagLXA0 = MP_MX(A1, pred_flag_index_l1, mxA);
737 if (is_available_a0 && !availableFlagLXA0) {
738 availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l0, mxA);
739 if (!availableFlagLXA0)
740 availableFlagLXA0 = MP_MX_LT(A0, pred_flag_index_l1, mxA);
743 if (is_available_a1 && !availableFlagLXA0) {
744 availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l0, mxA);
745 if (!availableFlagLXA0)
746 availableFlagLXA0 = MP_MX_LT(A1, pred_flag_index_l1, mxA);
749 if (availableFlagLXA0 && !mvp_lx_flag) {
755 // above right spatial merge candidate
758 xB0_pu = xB0 >> s->sps->log2_min_pu_size;
759 yB0_pu = yB0 >> s->sps->log2_min_pu_size;
761 is_available_b0 = PRED_BLOCK_AVAILABLE(B0) && AVAILABLE(cand_up_right, B0);
763 if (is_available_b0) {
764 availableFlagLXB0 = MP_MX(B0, pred_flag_index_l0, mxB);
765 if (!availableFlagLXB0)
766 availableFlagLXB0 = MP_MX(B0, pred_flag_index_l1, mxB);
769 if (!availableFlagLXB0) {
770 // above spatial merge candidate
773 xB1_pu = xB1 >> s->sps->log2_min_pu_size;
774 yB1_pu = yB1 >> s->sps->log2_min_pu_size;
776 is_available_b1 = AVAILABLE(cand_up, B1);
778 if (is_available_b1) {
779 availableFlagLXB0 = MP_MX(B1, pred_flag_index_l0, mxB);
780 if (!availableFlagLXB0)
781 availableFlagLXB0 = MP_MX(B1, pred_flag_index_l1, mxB);
785 if (!availableFlagLXB0) {
786 // above left spatial merge candidate
789 xB2_pu = xB2 >> s->sps->log2_min_pu_size;
790 yB2_pu = yB2 >> s->sps->log2_min_pu_size;
791 is_available_b2 = AVAILABLE(cand_up_left, B2);
793 if (is_available_b2) {
794 availableFlagLXB0 = MP_MX(B2, pred_flag_index_l0, mxB);
795 if (!availableFlagLXB0)
796 availableFlagLXB0 = MP_MX(B2, pred_flag_index_l1, mxB);
800 if (isScaledFlag_L0 == 0) {
801 if (availableFlagLXB0) {
802 availableFlagLXA0 = 1;
805 availableFlagLXB0 = 0;
808 if (is_available_b0) {
809 availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l0, mxB);
810 if (!availableFlagLXB0)
811 availableFlagLXB0 = MP_MX_LT(B0, pred_flag_index_l1, mxB);
814 if (is_available_b1 && !availableFlagLXB0) {
815 availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l0, mxB);
816 if (!availableFlagLXB0)
817 availableFlagLXB0 = MP_MX_LT(B1, pred_flag_index_l1, mxB);
820 if (is_available_b2 && !availableFlagLXB0) {
821 availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l0, mxB);
822 if (!availableFlagLXB0)
823 availableFlagLXB0 = MP_MX_LT(B2, pred_flag_index_l1, mxB);
827 if (availableFlagLXA0)
828 mvpcand_list[numMVPCandLX++] = mxA;
830 if (availableFlagLXB0 && (!availableFlagLXA0 || mxA.x != mxB.x || mxA.y != mxB.y))
831 mvpcand_list[numMVPCandLX++] = mxB;
833 //temporal motion vector prediction candidate
834 if (numMVPCandLX < 2 && s->sh.slice_temporal_mvp_enabled_flag) {
836 int available_col = temporal_luma_motion_vector(s, x0, y0, nPbW,
840 mvpcand_list[numMVPCandLX++] = mv_col;
843 // insert zero motion vectors when the number of available candidates are less than 2
844 while (numMVPCandLX < 2)
845 mvpcand_list[numMVPCandLX++] = (Mv){ 0, 0 };
847 mv->mv[LX].x = mvpcand_list[mvp_lx_flag].x;
848 mv->mv[LX].y = mvpcand_list[mvp_lx_flag].y;