4 * Copyright (C) 2012 - 2013 Guillaume Martres
5 * Copyright (C) 2013 Seppo Tomperi
6 * Copyright (C) 2013 Wassim Hamidouche
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 #include "libavutil/common.h"
26 #include "libavutil/internal.h"
34 static const uint8_t tctable[54] = {
35 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, // QP 0...18
36 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, // QP 19...37
37 5, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 24 // QP 38...53
40 static const uint8_t betatable[52] = {
41 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 6, 7, 8, // QP 0...18
42 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, // QP 19...37
43 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60, 62, 64 // QP 38...51
46 static int chroma_tc(HEVCContext *s, int qp_y, int c_idx, int tc_offset)
48 static const int qp_c[] = {
49 29, 30, 31, 32, 33, 33, 34, 34, 35, 35, 36, 36, 37, 37
51 int qp, qp_i, offset, idxt;
53 // slice qp offset is not used for deblocking
55 offset = s->ps.pps->cb_qp_offset;
57 offset = s->ps.pps->cr_qp_offset;
59 qp_i = av_clip(qp_y + offset, 0, 57);
60 if (s->ps.sps->chroma_format_idc == 1) {
68 qp = av_clip(qp_i, 0, 51);
71 idxt = av_clip(qp + DEFAULT_INTRA_TC_OFFSET + tc_offset, 0, 53);
75 static int get_qPy_pred(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
77 HEVCLocalContext *lc = s->HEVClc;
78 int ctb_size_mask = (1 << s->ps.sps->log2_ctb_size) - 1;
79 int MinCuQpDeltaSizeMask = (1 << (s->ps.sps->log2_ctb_size -
80 s->ps.pps->diff_cu_qp_delta_depth)) - 1;
81 int xQgBase = xBase - (xBase & MinCuQpDeltaSizeMask);
82 int yQgBase = yBase - (yBase & MinCuQpDeltaSizeMask);
83 int min_cb_width = s->ps.sps->min_cb_width;
84 int x_cb = xQgBase >> s->ps.sps->log2_min_cb_size;
85 int y_cb = yQgBase >> s->ps.sps->log2_min_cb_size;
86 int availableA = (xBase & ctb_size_mask) &&
87 (xQgBase & ctb_size_mask);
88 int availableB = (yBase & ctb_size_mask) &&
89 (yQgBase & ctb_size_mask);
90 int qPy_pred, qPy_a, qPy_b;
93 if (lc->first_qp_group || (!xQgBase && !yQgBase)) {
94 lc->first_qp_group = !lc->tu.is_cu_qp_delta_coded;
95 qPy_pred = s->sh.slice_qp;
97 qPy_pred = lc->qPy_pred;
104 qPy_a = s->qp_y_tab[(x_cb - 1) + y_cb * min_cb_width];
110 qPy_b = s->qp_y_tab[x_cb + (y_cb - 1) * min_cb_width];
112 av_assert2(qPy_a >= -s->ps.sps->qp_bd_offset && qPy_a < 52);
113 av_assert2(qPy_b >= -s->ps.sps->qp_bd_offset && qPy_b < 52);
115 return (qPy_a + qPy_b + 1) >> 1;
118 void ff_hevc_set_qPy(HEVCContext *s, int xBase, int yBase, int log2_cb_size)
120 int qp_y = get_qPy_pred(s, xBase, yBase, log2_cb_size);
122 if (s->HEVClc->tu.cu_qp_delta != 0) {
123 int off = s->ps.sps->qp_bd_offset;
124 s->HEVClc->qp_y = FFUMOD(qp_y + s->HEVClc->tu.cu_qp_delta + 52 + 2 * off,
127 s->HEVClc->qp_y = qp_y;
130 static int get_qPy(HEVCContext *s, int xC, int yC)
132 int log2_min_cb_size = s->ps.sps->log2_min_cb_size;
133 int x = xC >> log2_min_cb_size;
134 int y = yC >> log2_min_cb_size;
135 return s->qp_y_tab[x + y * s->ps.sps->min_cb_width];
138 static void copy_CTB(uint8_t *dst, const uint8_t *src, int width, int height,
139 ptrdiff_t stride_dst, ptrdiff_t stride_src)
143 if (((intptr_t)dst | (intptr_t)src | stride_dst | stride_src) & 15) {
144 for (i = 0; i < height; i++) {
145 for (j = 0; j < width; j+=8)
146 AV_COPY64U(dst+j, src+j);
151 for (i = 0; i < height; i++) {
152 for (j = 0; j < width; j+=16)
153 AV_COPY128(dst+j, src+j);
160 static void copy_pixel(uint8_t *dst, const uint8_t *src, int pixel_shift)
163 *(uint16_t *)dst = *(uint16_t *)src;
168 static void copy_vert(uint8_t *dst, const uint8_t *src,
169 int pixel_shift, int height,
170 ptrdiff_t stride_dst, ptrdiff_t stride_src)
173 if (pixel_shift == 0) {
174 for (i = 0; i < height; i++) {
180 for (i = 0; i < height; i++) {
181 *(uint16_t *)dst = *(uint16_t *)src;
188 static void copy_CTB_to_hv(HEVCContext *s, const uint8_t *src,
189 ptrdiff_t stride_src, int x, int y, int width, int height,
190 int c_idx, int x_ctb, int y_ctb)
192 int sh = s->ps.sps->pixel_shift;
193 int w = s->ps.sps->width >> s->ps.sps->hshift[c_idx];
194 int h = s->ps.sps->height >> s->ps.sps->vshift[c_idx];
196 /* copy horizontal edges */
197 memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb) * w + x) << sh),
199 memcpy(s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 1) * w + x) << sh),
200 src + stride_src * (height - 1), width << sh);
202 /* copy vertical edges */
203 copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb) * h + y) << sh), src, sh, height, 1 << sh, stride_src);
205 copy_vert(s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 1) * h + y) << sh), src + ((width - 1) << sh), sh, height, 1 << sh, stride_src);
208 static void restore_tqb_pixels(HEVCContext *s,
209 uint8_t *src1, const uint8_t *dst1,
210 ptrdiff_t stride_src, ptrdiff_t stride_dst,
211 int x0, int y0, int width, int height, int c_idx)
213 if ( s->ps.pps->transquant_bypass_enable_flag ||
214 (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) {
216 int min_pu_size = 1 << s->ps.sps->log2_min_pu_size;
217 int hshift = s->ps.sps->hshift[c_idx];
218 int vshift = s->ps.sps->vshift[c_idx];
219 int x_min = ((x0 ) >> s->ps.sps->log2_min_pu_size);
220 int y_min = ((y0 ) >> s->ps.sps->log2_min_pu_size);
221 int x_max = ((x0 + width ) >> s->ps.sps->log2_min_pu_size);
222 int y_max = ((y0 + height) >> s->ps.sps->log2_min_pu_size);
223 int len = (min_pu_size >> hshift) << s->ps.sps->pixel_shift;
224 for (y = y_min; y < y_max; y++) {
225 for (x = x_min; x < x_max; x++) {
226 if (s->is_pcm[y * s->ps.sps->min_pu_width + x]) {
228 uint8_t *src = src1 + (((y << s->ps.sps->log2_min_pu_size) - y0) >> vshift) * stride_src + ((((x << s->ps.sps->log2_min_pu_size) - x0) >> hshift) << s->ps.sps->pixel_shift);
229 const uint8_t *dst = dst1 + (((y << s->ps.sps->log2_min_pu_size) - y0) >> vshift) * stride_dst + ((((x << s->ps.sps->log2_min_pu_size) - x0) >> hshift) << s->ps.sps->pixel_shift);
230 for (n = 0; n < (min_pu_size >> vshift); n++) {
231 memcpy(src, dst, len);
241 #define CTB(tab, x, y) ((tab)[(y) * s->ps.sps->ctb_width + (x)])
243 static void sao_filter_CTB(HEVCContext *s, int x, int y)
245 static const uint8_t sao_tab[8] = { 0, 1, 2, 2, 3, 3, 4, 4 };
246 HEVCLocalContext *lc = s->HEVClc;
248 int edges[4]; // 0 left 1 top 2 right 3 bottom
249 int x_ctb = x >> s->ps.sps->log2_ctb_size;
250 int y_ctb = y >> s->ps.sps->log2_ctb_size;
251 int ctb_addr_rs = y_ctb * s->ps.sps->ctb_width + x_ctb;
252 int ctb_addr_ts = s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs];
253 SAOParams *sao = &CTB(s->sao, x_ctb, y_ctb);
254 // flags indicating unfilterable edges
255 uint8_t vert_edge[] = { 0, 0 };
256 uint8_t horiz_edge[] = { 0, 0 };
257 uint8_t diag_edge[] = { 0, 0, 0, 0 };
258 uint8_t lfase = CTB(s->filter_slice_edges, x_ctb, y_ctb);
259 uint8_t no_tile_filter = s->ps.pps->tiles_enabled_flag &&
260 !s->ps.pps->loop_filter_across_tiles_enabled_flag;
261 uint8_t restore = no_tile_filter || !lfase;
262 uint8_t left_tile_edge = 0;
263 uint8_t right_tile_edge = 0;
264 uint8_t up_tile_edge = 0;
265 uint8_t bottom_tile_edge = 0;
267 edges[0] = x_ctb == 0;
268 edges[1] = y_ctb == 0;
269 edges[2] = x_ctb == s->ps.sps->ctb_width - 1;
270 edges[3] = y_ctb == s->ps.sps->ctb_height - 1;
274 left_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs-1]];
275 vert_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb)) || left_tile_edge;
278 right_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs+1]];
279 vert_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb)) || right_tile_edge;
282 up_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs - s->ps.sps->ctb_width]];
283 horiz_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb - 1)) || up_tile_edge;
286 bottom_tile_edge = no_tile_filter && s->ps.pps->tile_id[ctb_addr_ts] != s->ps.pps->tile_id[s->ps.pps->ctb_addr_rs_to_ts[ctb_addr_rs + s->ps.sps->ctb_width]];
287 horiz_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb, y_ctb + 1)) || bottom_tile_edge;
289 if (!edges[0] && !edges[1]) {
290 diag_edge[0] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb - 1)) || left_tile_edge || up_tile_edge;
292 if (!edges[1] && !edges[2]) {
293 diag_edge[1] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb - 1)) || right_tile_edge || up_tile_edge;
295 if (!edges[2] && !edges[3]) {
296 diag_edge[2] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb + 1, y_ctb + 1)) || right_tile_edge || bottom_tile_edge;
298 if (!edges[0] && !edges[3]) {
299 diag_edge[3] = (!lfase && CTB(s->tab_slice_address, x_ctb, y_ctb) != CTB(s->tab_slice_address, x_ctb - 1, y_ctb + 1)) || left_tile_edge || bottom_tile_edge;
303 for (c_idx = 0; c_idx < (s->ps.sps->chroma_format_idc ? 3 : 1); c_idx++) {
304 int x0 = x >> s->ps.sps->hshift[c_idx];
305 int y0 = y >> s->ps.sps->vshift[c_idx];
306 ptrdiff_t stride_src = s->frame->linesize[c_idx];
307 int ctb_size_h = (1 << (s->ps.sps->log2_ctb_size)) >> s->ps.sps->hshift[c_idx];
308 int ctb_size_v = (1 << (s->ps.sps->log2_ctb_size)) >> s->ps.sps->vshift[c_idx];
309 int width = FFMIN(ctb_size_h, (s->ps.sps->width >> s->ps.sps->hshift[c_idx]) - x0);
310 int height = FFMIN(ctb_size_v, (s->ps.sps->height >> s->ps.sps->vshift[c_idx]) - y0);
311 int tab = sao_tab[(FFALIGN(width, 8) >> 3) - 1];
312 uint8_t *src = &s->frame->data[c_idx][y0 * stride_src + (x0 << s->ps.sps->pixel_shift)];
313 ptrdiff_t stride_dst;
316 switch (sao->type_idx[c_idx]) {
318 copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
320 if (s->ps.pps->transquant_bypass_enable_flag ||
321 (s->ps.sps->pcm.loop_filter_disable_flag && s->ps.sps->pcm_enabled_flag)) {
322 dst = lc->edge_emu_buffer;
323 stride_dst = 2*MAX_PB_SIZE;
324 copy_CTB(dst, src, width << s->ps.sps->pixel_shift, height, stride_dst, stride_src);
325 s->hevcdsp.sao_band_filter[tab](src, dst, stride_src, stride_dst,
326 sao->offset_val[c_idx], sao->band_position[c_idx],
328 restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
329 x, y, width, height, c_idx);
331 s->hevcdsp.sao_band_filter[tab](src, src, stride_src, stride_src,
332 sao->offset_val[c_idx], sao->band_position[c_idx],
335 sao->type_idx[c_idx] = SAO_APPLIED;
339 int w = s->ps.sps->width >> s->ps.sps->hshift[c_idx];
340 int h = s->ps.sps->height >> s->ps.sps->vshift[c_idx];
341 int left_edge = edges[0];
342 int top_edge = edges[1];
343 int right_edge = edges[2];
344 int bottom_edge = edges[3];
345 int sh = s->ps.sps->pixel_shift;
346 int left_pixels, right_pixels;
348 stride_dst = 2*MAX_PB_SIZE + AV_INPUT_BUFFER_PADDING_SIZE;
349 dst = lc->edge_emu_buffer + stride_dst + AV_INPUT_BUFFER_PADDING_SIZE;
352 int left = 1 - left_edge;
353 int right = 1 - right_edge;
354 const uint8_t *src1[2];
358 dst1 = dst - stride_dst - (left << sh);
359 src1[0] = src - stride_src - (left << sh);
360 src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb - 1) * w + x0 - left) << sh);
363 src_idx = (CTB(s->sao, x_ctb-1, y_ctb-1).type_idx[c_idx] ==
365 copy_pixel(dst1, src1[src_idx], sh);
368 src_idx = (CTB(s->sao, x_ctb, y_ctb-1).type_idx[c_idx] ==
370 memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
373 src_idx = (CTB(s->sao, x_ctb+1, y_ctb-1).type_idx[c_idx] ==
375 copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
379 int left = 1 - left_edge;
380 int right = 1 - right_edge;
381 const uint8_t *src1[2];
385 dst1 = dst + height * stride_dst - (left << sh);
386 src1[0] = src + height * stride_src - (left << sh);
387 src1[1] = s->sao_pixel_buffer_h[c_idx] + (((2 * y_ctb + 2) * w + x0 - left) << sh);
390 src_idx = (CTB(s->sao, x_ctb-1, y_ctb+1).type_idx[c_idx] ==
392 copy_pixel(dst1, src1[src_idx], sh);
395 src_idx = (CTB(s->sao, x_ctb, y_ctb+1).type_idx[c_idx] ==
397 memcpy(dst1 + pos, src1[src_idx] + pos, width << sh);
400 src_idx = (CTB(s->sao, x_ctb+1, y_ctb+1).type_idx[c_idx] ==
402 copy_pixel(dst1 + pos, src1[src_idx] + pos, sh);
407 if (CTB(s->sao, x_ctb-1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
408 copy_vert(dst - (1 << sh),
409 s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb - 1) * h + y0) << sh),
410 sh, height, stride_dst, 1 << sh);
417 if (CTB(s->sao, x_ctb+1, y_ctb).type_idx[c_idx] == SAO_APPLIED) {
418 copy_vert(dst + (width << sh),
419 s->sao_pixel_buffer_v[c_idx] + (((2 * x_ctb + 2) * h + y0) << sh),
420 sh, height, stride_dst, 1 << sh);
426 copy_CTB(dst - (left_pixels << sh),
427 src - (left_pixels << sh),
428 (width + left_pixels + right_pixels) << sh,
429 height, stride_dst, stride_src);
431 copy_CTB_to_hv(s, src, stride_src, x0, y0, width, height, c_idx,
433 s->hevcdsp.sao_edge_filter[tab](src, dst, stride_src, sao->offset_val[c_idx],
434 sao->eo_class[c_idx], width, height);
435 s->hevcdsp.sao_edge_restore[restore](src, dst,
436 stride_src, stride_dst,
443 restore_tqb_pixels(s, src, dst, stride_src, stride_dst,
444 x, y, width, height, c_idx);
445 sao->type_idx[c_idx] = SAO_APPLIED;
452 static int get_pcm(HEVCContext *s, int x, int y)
454 int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
460 x_pu = x >> log2_min_pu_size;
461 y_pu = y >> log2_min_pu_size;
463 if (x_pu >= s->ps.sps->min_pu_width || y_pu >= s->ps.sps->min_pu_height)
465 return s->is_pcm[y_pu * s->ps.sps->min_pu_width + x_pu];
468 #define TC_CALC(qp, bs) \
469 tctable[av_clip((qp) + DEFAULT_INTRA_TC_OFFSET * ((bs) - 1) + \
471 0, MAX_QP + DEFAULT_INTRA_TC_OFFSET)]
473 static void deblocking_filter_CTB(HEVCContext *s, int x0, int y0)
478 int32_t c_tc[2], tc[2];
479 uint8_t no_p[2] = { 0 };
480 uint8_t no_q[2] = { 0 };
482 int log2_ctb_size = s->ps.sps->log2_ctb_size;
483 int x_end, x_end2, y_end;
484 int ctb_size = 1 << log2_ctb_size;
485 int ctb = (x0 >> log2_ctb_size) +
486 (y0 >> log2_ctb_size) * s->ps.sps->ctb_width;
487 int cur_tc_offset = s->deblock[ctb].tc_offset;
488 int cur_beta_offset = s->deblock[ctb].beta_offset;
489 int left_tc_offset, left_beta_offset;
490 int tc_offset, beta_offset;
491 int pcmf = (s->ps.sps->pcm_enabled_flag &&
492 s->ps.sps->pcm.loop_filter_disable_flag) ||
493 s->ps.pps->transquant_bypass_enable_flag;
496 left_tc_offset = s->deblock[ctb - 1].tc_offset;
497 left_beta_offset = s->deblock[ctb - 1].beta_offset;
500 left_beta_offset = 0;
503 x_end = x0 + ctb_size;
504 if (x_end > s->ps.sps->width)
505 x_end = s->ps.sps->width;
506 y_end = y0 + ctb_size;
507 if (y_end > s->ps.sps->height)
508 y_end = s->ps.sps->height;
510 tc_offset = cur_tc_offset;
511 beta_offset = cur_beta_offset;
514 if (x_end2 != s->ps.sps->width)
516 for (y = y0; y < y_end; y += 8) {
517 // vertical filtering luma
518 for (x = x0 ? x0 : 8; x < x_end; x += 8) {
519 const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
520 const int bs1 = s->vertical_bs[(x + (y + 4) * s->bs_width) >> 2];
522 const int qp = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
524 beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
526 tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
527 tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
528 src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->ps.sps->pixel_shift)];
530 no_p[0] = get_pcm(s, x - 1, y);
531 no_p[1] = get_pcm(s, x - 1, y + 4);
532 no_q[0] = get_pcm(s, x, y);
533 no_q[1] = get_pcm(s, x, y + 4);
534 s->hevcdsp.hevc_v_loop_filter_luma_c(src,
535 s->frame->linesize[LUMA],
536 beta, tc, no_p, no_q);
538 s->hevcdsp.hevc_v_loop_filter_luma(src,
539 s->frame->linesize[LUMA],
540 beta, tc, no_p, no_q);
547 // horizontal filtering luma
548 for (x = x0 ? x0 - 8 : 0; x < x_end2; x += 8) {
549 const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
550 const int bs1 = s->horizontal_bs[((x + 4) + y * s->bs_width) >> 2];
552 const int qp = (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1;
554 tc_offset = x >= x0 ? cur_tc_offset : left_tc_offset;
555 beta_offset = x >= x0 ? cur_beta_offset : left_beta_offset;
557 beta = betatable[av_clip(qp + beta_offset, 0, MAX_QP)];
558 tc[0] = bs0 ? TC_CALC(qp, bs0) : 0;
559 tc[1] = bs1 ? TC_CALC(qp, bs1) : 0;
560 src = &s->frame->data[LUMA][y * s->frame->linesize[LUMA] + (x << s->ps.sps->pixel_shift)];
562 no_p[0] = get_pcm(s, x, y - 1);
563 no_p[1] = get_pcm(s, x + 4, y - 1);
564 no_q[0] = get_pcm(s, x, y);
565 no_q[1] = get_pcm(s, x + 4, y);
566 s->hevcdsp.hevc_h_loop_filter_luma_c(src,
567 s->frame->linesize[LUMA],
568 beta, tc, no_p, no_q);
570 s->hevcdsp.hevc_h_loop_filter_luma(src,
571 s->frame->linesize[LUMA],
572 beta, tc, no_p, no_q);
577 if (s->ps.sps->chroma_format_idc) {
578 for (chroma = 1; chroma <= 2; chroma++) {
579 int h = 1 << s->ps.sps->hshift[chroma];
580 int v = 1 << s->ps.sps->vshift[chroma];
582 // vertical filtering chroma
583 for (y = y0; y < y_end; y += (8 * v)) {
584 for (x = x0 ? x0 : 8 * h; x < x_end; x += (8 * h)) {
585 const int bs0 = s->vertical_bs[(x + y * s->bs_width) >> 2];
586 const int bs1 = s->vertical_bs[(x + (y + (4 * v)) * s->bs_width) >> 2];
588 if ((bs0 == 2) || (bs1 == 2)) {
589 const int qp0 = (get_qPy(s, x - 1, y) + get_qPy(s, x, y) + 1) >> 1;
590 const int qp1 = (get_qPy(s, x - 1, y + (4 * v)) + get_qPy(s, x, y + (4 * v)) + 1) >> 1;
592 c_tc[0] = (bs0 == 2) ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
593 c_tc[1] = (bs1 == 2) ? chroma_tc(s, qp1, chroma, tc_offset) : 0;
594 src = &s->frame->data[chroma][(y >> s->ps.sps->vshift[chroma]) * s->frame->linesize[chroma] + ((x >> s->ps.sps->hshift[chroma]) << s->ps.sps->pixel_shift)];
596 no_p[0] = get_pcm(s, x - 1, y);
597 no_p[1] = get_pcm(s, x - 1, y + (4 * v));
598 no_q[0] = get_pcm(s, x, y);
599 no_q[1] = get_pcm(s, x, y + (4 * v));
600 s->hevcdsp.hevc_v_loop_filter_chroma_c(src,
601 s->frame->linesize[chroma],
604 s->hevcdsp.hevc_v_loop_filter_chroma(src,
605 s->frame->linesize[chroma],
613 // horizontal filtering chroma
614 tc_offset = x0 ? left_tc_offset : cur_tc_offset;
616 if (x_end != s->ps.sps->width)
617 x_end2 = x_end - 8 * h;
618 for (x = x0 ? x0 - 8 * h : 0; x < x_end2; x += (8 * h)) {
619 const int bs0 = s->horizontal_bs[( x + y * s->bs_width) >> 2];
620 const int bs1 = s->horizontal_bs[((x + 4 * h) + y * s->bs_width) >> 2];
621 if ((bs0 == 2) || (bs1 == 2)) {
622 const int qp0 = bs0 == 2 ? (get_qPy(s, x, y - 1) + get_qPy(s, x, y) + 1) >> 1 : 0;
623 const int qp1 = bs1 == 2 ? (get_qPy(s, x + (4 * h), y - 1) + get_qPy(s, x + (4 * h), y) + 1) >> 1 : 0;
625 c_tc[0] = bs0 == 2 ? chroma_tc(s, qp0, chroma, tc_offset) : 0;
626 c_tc[1] = bs1 == 2 ? chroma_tc(s, qp1, chroma, cur_tc_offset) : 0;
627 src = &s->frame->data[chroma][(y >> s->ps.sps->vshift[1]) * s->frame->linesize[chroma] + ((x >> s->ps.sps->hshift[1]) << s->ps.sps->pixel_shift)];
629 no_p[0] = get_pcm(s, x, y - 1);
630 no_p[1] = get_pcm(s, x + (4 * h), y - 1);
631 no_q[0] = get_pcm(s, x, y);
632 no_q[1] = get_pcm(s, x + (4 * h), y);
633 s->hevcdsp.hevc_h_loop_filter_chroma_c(src,
634 s->frame->linesize[chroma],
637 s->hevcdsp.hevc_h_loop_filter_chroma(src,
638 s->frame->linesize[chroma],
647 static int boundary_strength(HEVCContext *s, MvField *curr, MvField *neigh,
648 RefPicList *neigh_refPicList)
650 if (curr->pred_flag == PF_BI && neigh->pred_flag == PF_BI) {
652 if (s->ref->refPicList[0].list[curr->ref_idx[0]] == neigh_refPicList[0].list[neigh->ref_idx[0]] &&
653 s->ref->refPicList[0].list[curr->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]] &&
654 neigh_refPicList[0].list[neigh->ref_idx[0]] == neigh_refPicList[1].list[neigh->ref_idx[1]]) {
655 if ((FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
656 FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4) &&
657 (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
658 FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4))
662 } else if (neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
663 neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
664 if (FFABS(neigh->mv[0].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[0].y) >= 4 ||
665 FFABS(neigh->mv[1].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[1].y) >= 4)
669 } else if (neigh_refPicList[1].list[neigh->ref_idx[1]] == s->ref->refPicList[0].list[curr->ref_idx[0]] &&
670 neigh_refPicList[0].list[neigh->ref_idx[0]] == s->ref->refPicList[1].list[curr->ref_idx[1]]) {
671 if (FFABS(neigh->mv[1].x - curr->mv[0].x) >= 4 || FFABS(neigh->mv[1].y - curr->mv[0].y) >= 4 ||
672 FFABS(neigh->mv[0].x - curr->mv[1].x) >= 4 || FFABS(neigh->mv[0].y - curr->mv[1].y) >= 4)
679 } else if ((curr->pred_flag != PF_BI) && (neigh->pred_flag != PF_BI)){ // 1 MV
683 if (curr->pred_flag & 1) {
685 ref_A = s->ref->refPicList[0].list[curr->ref_idx[0]];
688 ref_A = s->ref->refPicList[1].list[curr->ref_idx[1]];
691 if (neigh->pred_flag & 1) {
693 ref_B = neigh_refPicList[0].list[neigh->ref_idx[0]];
696 ref_B = neigh_refPicList[1].list[neigh->ref_idx[1]];
699 if (ref_A == ref_B) {
700 if (FFABS(A.x - B.x) >= 4 || FFABS(A.y - B.y) >= 4)
711 void ff_hevc_deblocking_boundary_strengths(HEVCContext *s, int x0, int y0,
714 HEVCLocalContext *lc = s->HEVClc;
715 MvField *tab_mvf = s->ref->tab_mvf;
716 int log2_min_pu_size = s->ps.sps->log2_min_pu_size;
717 int log2_min_tu_size = s->ps.sps->log2_min_tb_size;
718 int min_pu_width = s->ps.sps->min_pu_width;
719 int min_tu_width = s->ps.sps->min_tb_width;
720 int is_intra = tab_mvf[(y0 >> log2_min_pu_size) * min_pu_width +
721 (x0 >> log2_min_pu_size)].pred_flag == PF_INTRA;
722 int boundary_upper, boundary_left;
725 boundary_upper = y0 > 0 && !(y0 & 7);
726 if (boundary_upper &&
727 ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
728 lc->boundary_flags & BOUNDARY_UPPER_SLICE &&
729 (y0 % (1 << s->ps.sps->log2_ctb_size)) == 0) ||
730 (!s->ps.pps->loop_filter_across_tiles_enabled_flag &&
731 lc->boundary_flags & BOUNDARY_UPPER_TILE &&
732 (y0 % (1 << s->ps.sps->log2_ctb_size)) == 0)))
735 if (boundary_upper) {
736 RefPicList *rpl_top = (lc->boundary_flags & BOUNDARY_UPPER_SLICE) ?
737 ff_hevc_get_ref_list(s, s->ref, x0, y0 - 1) :
739 int yp_pu = (y0 - 1) >> log2_min_pu_size;
740 int yq_pu = y0 >> log2_min_pu_size;
741 int yp_tu = (y0 - 1) >> log2_min_tu_size;
742 int yq_tu = y0 >> log2_min_tu_size;
744 for (i = 0; i < (1 << log2_trafo_size); i += 4) {
745 int x_pu = (x0 + i) >> log2_min_pu_size;
746 int x_tu = (x0 + i) >> log2_min_tu_size;
747 MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
748 MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
749 uint8_t top_cbf_luma = s->cbf_luma[yp_tu * min_tu_width + x_tu];
750 uint8_t curr_cbf_luma = s->cbf_luma[yq_tu * min_tu_width + x_tu];
752 if (curr->pred_flag == PF_INTRA || top->pred_flag == PF_INTRA)
754 else if (curr_cbf_luma || top_cbf_luma)
757 bs = boundary_strength(s, curr, top, rpl_top);
758 s->horizontal_bs[((x0 + i) + y0 * s->bs_width) >> 2] = bs;
762 // bs for vertical TU boundaries
763 boundary_left = x0 > 0 && !(x0 & 7);
765 ((!s->sh.slice_loop_filter_across_slices_enabled_flag &&
766 lc->boundary_flags & BOUNDARY_LEFT_SLICE &&
767 (x0 % (1 << s->ps.sps->log2_ctb_size)) == 0) ||
768 (!s->ps.pps->loop_filter_across_tiles_enabled_flag &&
769 lc->boundary_flags & BOUNDARY_LEFT_TILE &&
770 (x0 % (1 << s->ps.sps->log2_ctb_size)) == 0)))
774 RefPicList *rpl_left = (lc->boundary_flags & BOUNDARY_LEFT_SLICE) ?
775 ff_hevc_get_ref_list(s, s->ref, x0 - 1, y0) :
777 int xp_pu = (x0 - 1) >> log2_min_pu_size;
778 int xq_pu = x0 >> log2_min_pu_size;
779 int xp_tu = (x0 - 1) >> log2_min_tu_size;
780 int xq_tu = x0 >> log2_min_tu_size;
782 for (i = 0; i < (1 << log2_trafo_size); i += 4) {
783 int y_pu = (y0 + i) >> log2_min_pu_size;
784 int y_tu = (y0 + i) >> log2_min_tu_size;
785 MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
786 MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
787 uint8_t left_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xp_tu];
788 uint8_t curr_cbf_luma = s->cbf_luma[y_tu * min_tu_width + xq_tu];
790 if (curr->pred_flag == PF_INTRA || left->pred_flag == PF_INTRA)
792 else if (curr_cbf_luma || left_cbf_luma)
795 bs = boundary_strength(s, curr, left, rpl_left);
796 s->vertical_bs[(x0 + (y0 + i) * s->bs_width) >> 2] = bs;
800 if (log2_trafo_size > log2_min_pu_size && !is_intra) {
801 RefPicList *rpl = s->ref->refPicList;
803 // bs for TU internal horizontal PU boundaries
804 for (j = 8; j < (1 << log2_trafo_size); j += 8) {
805 int yp_pu = (y0 + j - 1) >> log2_min_pu_size;
806 int yq_pu = (y0 + j) >> log2_min_pu_size;
808 for (i = 0; i < (1 << log2_trafo_size); i += 4) {
809 int x_pu = (x0 + i) >> log2_min_pu_size;
810 MvField *top = &tab_mvf[yp_pu * min_pu_width + x_pu];
811 MvField *curr = &tab_mvf[yq_pu * min_pu_width + x_pu];
813 bs = boundary_strength(s, curr, top, rpl);
814 s->horizontal_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
818 // bs for TU internal vertical PU boundaries
819 for (j = 0; j < (1 << log2_trafo_size); j += 4) {
820 int y_pu = (y0 + j) >> log2_min_pu_size;
822 for (i = 8; i < (1 << log2_trafo_size); i += 8) {
823 int xp_pu = (x0 + i - 1) >> log2_min_pu_size;
824 int xq_pu = (x0 + i) >> log2_min_pu_size;
825 MvField *left = &tab_mvf[y_pu * min_pu_width + xp_pu];
826 MvField *curr = &tab_mvf[y_pu * min_pu_width + xq_pu];
828 bs = boundary_strength(s, curr, left, rpl);
829 s->vertical_bs[((x0 + i) + (y0 + j) * s->bs_width) >> 2] = bs;
839 void ff_hevc_hls_filter(HEVCContext *s, int x, int y, int ctb_size)
841 int x_end = x >= s->ps.sps->width - ctb_size;
843 if (s->avctx->skip_loop_filter >= AVDISCARD_ALL ||
844 (s->avctx->skip_loop_filter >= AVDISCARD_NONKEY && !IS_IDR(s)) ||
845 (s->avctx->skip_loop_filter >= AVDISCARD_NONINTRA &&
846 s->sh.slice_type != HEVC_SLICE_I) ||
847 (s->avctx->skip_loop_filter >= AVDISCARD_BIDIR &&
848 s->sh.slice_type == HEVC_SLICE_B) ||
849 (s->avctx->skip_loop_filter >= AVDISCARD_NONREF &&
850 ff_hevc_nal_is_nonref(s->nal_unit_type)))
854 deblocking_filter_CTB(s, x, y);
855 if (s->ps.sps->sao_enabled && !skip) {
856 int y_end = y >= s->ps.sps->height - ctb_size;
858 sao_filter_CTB(s, x - ctb_size, y - ctb_size);
860 sao_filter_CTB(s, x - ctb_size, y);
862 sao_filter_CTB(s, x, y - ctb_size);
863 if (s->threads_type & FF_THREAD_FRAME )
864 ff_thread_report_progress(&s->ref->tf, y, 0);
866 if (x_end && y_end) {
867 sao_filter_CTB(s, x , y);
868 if (s->threads_type & FF_THREAD_FRAME )
869 ff_thread_report_progress(&s->ref->tf, y + ctb_size, 0);
871 } else if (s->threads_type & FF_THREAD_FRAME && x_end)
872 ff_thread_report_progress(&s->ref->tf, y + ctb_size - 4, 0);
875 void ff_hevc_hls_filters(HEVCContext *s, int x_ctb, int y_ctb, int ctb_size)
877 int x_end = x_ctb >= s->ps.sps->width - ctb_size;
878 int y_end = y_ctb >= s->ps.sps->height - ctb_size;
880 ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb - ctb_size, ctb_size);
882 ff_hevc_hls_filter(s, x_ctb, y_ctb - ctb_size, ctb_size);
884 ff_hevc_hls_filter(s, x_ctb - ctb_size, y_ctb, ctb_size);